ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4126db7179c575fccd81bacd776f774365dcc3 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from marshmallow import fields, validate
import polyaxon_sdk
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.matrix.base import BaseSearchConfig
from polyaxon.polyflow.matrix.kinds import V1MatrixKind
from polyaxon.polyflow.matrix.params import HpParamSchema
from polyaxon.polyflow.matrix.tuner import TunerSchema
from polyaxon.schemas.base import BaseCamelSchema
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class IterativeSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1MatrixKind.ITERATIVE))
max_iterations = RefOrObject(
fields.Int(required=True, validate=validate.Range(min=1)), required=True
)
concurrency = RefOrObject(fields.Int(allow_none=True))
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(HpParamSchema), allow_none=True
)
seed = RefOrObject(fields.Int(allow_none=True))
tuner = fields.Nested(TunerSchema, allow_none=True)
early_stopping = fields.List(fields.Nested(EarlyStoppingSchema), allow_none=True)
@staticmethod
def schema_config():
return V1Iterative
class V1Iterative(BaseSearchConfig, polyaxon_sdk.V1Iterative):
"""To build a custom optimization algorithm, this interface lets you create an iterative
process for creating suggestions and training your model based on those suggestions
The iterative process expect a user defined a tuner that will generate the suggestions for
running the component.
Args:
kind: str, should be equal `iterative`
max_iterations: int
params: List[Dict[str, [params](/docs/automation/optimization-engine/params/)]]
concurrency: int, optional
seed: int, optional
tuner: [V1Tuner](/docs/automation/optimization-engine/tuner/), optional
early_stopping: List[[EarlyStopping](/docs/automation/helpers/early-stopping)], optional
## YAML usage
```yaml
>>> matrix:
>>> kind: iterative
>>> concurrency:
>>> params:
>>> maxIterations:
>>> seed:
>>> tuner:
>>> earlyStopping:
```
## Python usage
```python
>>> from polyaxon.k8s import k8s_schemas
>>> from polyaxon.polyflow import (
>>> V1Iterative,
>>> V1HpLogSpace,
>>> V1HpUniform,
>>> V1FailureEarlyStopping,
>>> V1MetricEarlyStopping,
>>> V1Tuner,
>>> )
>>> matrix = V1Iterative(
>>> max_iterations=20,
>>> concurrency=2,
>>> seed=23,
>>> params={"param1": V1HpLogSpace(...), "param2": V1HpUniform(...), ... },
>>> early_stopping=[V1FailureEarlyStopping(...), V1MetricEarlyStopping(...)],
>>> tuner=V1Tuner(hub_ref="org/my-suggestion-component")
>>> )
```
## Fields
### kind
The kind signals to the CLI, client, and other tools that this matrix is an iterative process.
If you are using the python client to create the mapping,
this field is not required and is set by default.
```yaml
>>> matrix:
>>> kind: iterative
```
### concurrency
An optional value to set the number of concurrent operations.
<blockquote class="light">
This value only makes sense if less or equal to the total number of possible runs.
</blockquote>
```yaml
>>> matrix:
>>> kind: iterative
>>> concurrency: 2
```
For more details about concurrency management,
please check the [concurrency section](/docs/automation/helpers/concurrency/).
### params
A dictionary of `key -> value generator`
to generate the parameters.
To learn about all possible
[params generators](/docs/automation/optimization-engine/params/).
> The parameters generated will be validated against
> the component's inputs/outputs definition to check that the values
> can be passed and have valid types.
```yaml
>>> matrix:
>>> kind: iterative
>>> params:
>>> param1:
>>> kind: ...
>>> value: ...
>>> param2:
>>> kind: ...
>>> value: ...
```
### maxIterations
Maximum number of iterations to run the process of \\-> suggestions -> training ->\\
```yaml
>>> matrix:
>>> kind: iterative
>>> maxIterations: 5
```
### seed
Since this algorithm uses random generators,
if you want to control the seed for the random generator, you can pass a seed.
```yaml
>>> matrix:
>>> kind: iterative
>>> seed: 523
```
### earlyStopping
A list of early stopping conditions to check for terminating
all operations managed by the pipeline.
If one of the early stopping conditions is met,
a signal will be sent to terminate all running and pending operations.
```yaml
>>> matrix:
>>> kind: iterative
>>> earlyStopping: ...
```
For more details please check the
[early stopping section](/docs/automation/helpers/early-stopping/).
### tuner
The tuner reference definition (with a component hub reference) to use.
The component contains the logic for creating new suggestions.
```yaml
>>> matrix:
>>> kind: iterative
>>> tuner:
>>> hubRef: acme/suggestion-logic:v1
```
## Example
In this example the iterative process will try run 5 iterations generating new experiments
based on the search space defined in the params subsection.
```yaml
>>> version: 1.1
>>> kind: operation
>>> matrix:
>>> kind: iterative
>>> concurrency: 10
>>> maxIterations: 5
>>> tuner:
>>> hubRef: my-suggestion-component
>>> params:
>>> lr:
>>> kind: logspace
>>> value: 0.01:0.1:5
>>> dropout:
>>> kind: choice
>>> value: [0.2, 0.5]
>>> activation:
>>> kind: pchoice
>>> value: [[elu, 0.1], [relu, 0.2], [sigmoid, 0.7]]
>>> early_stopping:
>>> - metric: accuracy
>>> value: 0.9
>>> optimization: maximize
>>> - metric: loss
>>> value: 0.05
>>> optimization: minimize
>>> component:
>>> inputs:
>>> - name: batch_size
>>> type: int
>>> isOptional: true
>>> value: 128
>>> - name: lr
>>> type: float
>>> - name: dropout
>>> type: float
>>> - name: activation
>>> type: str
>>> container:
>>> image: image:latest
>>> command: [python3, train.py]
>>> args: [
>>> "--batch-size={{ batch_size }}",
>>> "--lr={{ lr }}",
>>> "--dropout={{ dropout }}",
>>> "--activation={{ activation }}"
>>> ]
```
"""
IDENTIFIER = V1MatrixKind.ITERATIVE
SCHEMA = IterativeSchema
REDUCED_ATTRIBUTES = [
"maxIterations",
"params",
"seed",
"tuner",
"earlyStopping",
"concurrency",
]
def create_iteration(self, iteration: int = None) -> int:
if iteration is None:
return 0
return iteration + 1
def should_reschedule(self, iteration):
"""Return a boolean to indicate if we need to reschedule another iteration."""
return iteration < self.max_iterations
|
py | 1a412740560634d982b59098920d588dc40928a6 | # Import models
from mmic_md.models.input import MDInput
from mmic_md_gmx.models import ComputeGmxInput
from cmselemental.util.files import random_file
# Import components
from mmic_cmd.components import CmdComponent
from mmic.components.blueprints import GenericComponent
from typing import Any, Dict, List, Tuple, Optional
import os
__all__ = ["PrepGmxComponent"]
_supported_solvents = ("spc", "tip3p", "tip4p") # This line may be delete later
class PrepGmxComponent(GenericComponent):
"""
Prepares input for running molecular dynamics simulations using GMX engine.
The Molecule object from MMIC schema will be
converted to a .pdb file here then converted to a .gro file.
.mdp and .top files will also be constructed
according to the info in MMIC schema.
"""
@classmethod
def input(cls):
return MDInput
@classmethod
def output(cls):
return ComputeGmxInput
def execute(
self,
inputs: MDInput,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, ComputeGmxInput]:
if isinstance(inputs, dict):
inputs = self.input()(**inputs)
# Start to build mdp file dict
mdp_inputs = {
"integrator": inputs.method,
"dt": inputs.step_size,
"nsteps": inputs.max_steps,
"coulombtype": inputs.long_forces.method,
"vdw-type": inputs.short_forces.method,
"pbc": inputs.boundary,
}
# Extract output setup from freq_write dict
for key, val in inputs.freq_write.items():
mdp_inputs[key] = val
# Extract T couple and P couple setup
for key, val in inputs.Tcoupl_arg.items():
mdp_inputs[key] = val
for key, val in inputs.Pcoupl_arg.items():
mdp_inputs[key] = val
# Translate boundary str tuple (perodic,perodic,perodic) to a string e.g. xyz
pbc_dict = dict(zip(["x", "y", "z"], list(mdp_inputs["pbc"])))
pbc = ""
for dim in list(pbc_dict.keys()):
if pbc_dict[dim] != "periodic":
continue
else:
pbc = pbc + dim # pbc is a str, may need to be initiated elsewhere
mdp_inputs["pbc"] = pbc
# Write .mdp file
mdp_file = random_file(suffix=".mdp")
with open(mdp_file, "w") as inp:
for key, val in mdp_inputs.items():
inp.write(f"{key} = {val}\n")
fs = inputs.forcefield
mols = inputs.molecule
ff_name, ff = list(
fs.items()
).pop() # Here ff_name gets actually the related mol name, but it will not be used
mol_name, mol = list(mols.items()).pop()
gro_file = random_file(suffix=".gro") # output gro
top_file = random_file(suffix=".top")
boxed_gro_file = random_file(suffix=".gro")
mol.to_file(gro_file, translator="mmic_parmed")
ff.to_file(top_file, translator="mmic_parmed")
input_model = {
"gro_file": gro_file,
"proc_input": inputs,
"boxed_gro_file": boxed_gro_file,
}
clean_files, cmd_input = self.build_input(input_model)
rvalue = CmdComponent.compute(cmd_input)
boxed_gro_file = str(rvalue.outfiles[boxed_gro_file])
scratch_dir = str(rvalue.scratch_directory)
self.cleanup(clean_files) # Del the gro in the working dir
gmx_compute = ComputeGmxInput(
proc_input=inputs,
mdp_file=mdp_file,
forcefield=top_file,
molecule=boxed_gro_file,
scratch_dir=scratch_dir,
schema_name=inputs.schema_name,
schema_version=inputs.schema_version,
)
return True, gmx_compute
@staticmethod
def cleanup(remove: List[str]):
for item in remove:
if os.path.isdir(item):
shutil.rmtree(item)
elif os.path.isfile(item):
os.remove(item)
def build_input(
self,
inputs: Dict[str, Any],
config: Optional["TaskConfig"] = None,
template: Optional[str] = None,
) -> Dict[str, Any]:
assert inputs["proc_input"].engine == "gmx", "Engine must be gmx (Gromacs)!"
clean_files = []
boxed_gro_file = inputs["boxed_gro_file"]
clean_files.append(inputs["gro_file"])
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
cmd = [
inputs["proc_input"].engine,
"editconf",
"-f",
inputs["gro_file"],
"-d",
"2",
"-o",
boxed_gro_file,
]
outfiles = [boxed_gro_file]
return clean_files, {
"command": cmd,
"infiles": [inputs["gro_file"]],
"outfiles": outfiles,
"outfiles_track": outfiles,
"scratch_directory": scratch_directory,
"environment": env,
"scratch_messy": True,
}
|
py | 1a4128c5ecc3bb1a2bb119a7d7ce6a1e2359d4ef | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import functools
import logging
from .endpoints.base import BaseEndpoint
from .endpoints.base import catch_errors_and_unavailability
from .errors import TemporarilyUnavailableError, ServerError
from .errors import FatalClientError, OAuth2Error
log = logging.getLogger(__name__)
|
py | 1a41295e3322f83c1b93aa2ead1b265fb1fafcac | # _base_ = ['../../_base_/models/csn_ig65m_pretrained.py']
# ir-CSN (interaction-reduced channel-separated network) architecture
ann_type = 'tanz_base' # * change accordingly
num_classes = 9 if ann_type == 'tanz_base' else 42
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dCSN',
pretrained2d=False, # doesn't have imagenet pre-training
# but has 3D Reset pretraining
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501
depth=152,
with_pool2=False,
bottleneck_mode='ir',
norm_eval=True,
bn_frozen=True,
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=num_classes,
in_channels=2048,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
### dataset settings
dataset_type = 'RawframeDataset'
data_root = ''
data_root_val = data_root
data_root_test = data_root
ann_file_train = '/datasets/write/rawframes/rawframes_train.txt'
ann_file_val = '/datasets/write/rawframes/rawframes_val.txt'
ann_file_test = '/datasets/write/rawframes/rawframes_test.txt'
img_norm_cfg = dict(
mean=[110.2008, 100.63983, 95.99475],
std=[58.14765, 56.46975, 55.332195],
to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=3,
workers_per_gpu=3,
test_dataloader=dict(videos_per_gpu=1, workers_per_gpu=1),
val_dataloader=dict(videos_per_gpu=1, workers_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.05625, # for 4 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[32, 48],
warmup='linear',
warmup_ratio=0.1,
warmup_by_epoch=True,
warmup_iters=16)
total_epochs = 100
checkpoint_config = dict(interval=5)
evaluation = dict(
interval=5,
metric_options=dict(top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))),
)
eval_config = dict(
metric_options=dict(top_k_accuracy=dict(topk=(1, 2, 3, 4, 5))))
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = ('https://download.openmmlab.com/mmaction/recognition/csn/'
'vmz/vmz_ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-e63ee1bd.pth')
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
|
py | 1a412b3563d120a741004515df5c83a989a6cd46 | import graphene
from django.core.exceptions import ValidationError
from ...account import models as account_models
from ...core.error_codes import ShopErrorCode
from ...core.permissions import SitePermissions
from ...core.utils.url import validate_storefront_url
from ...site import models as site_models
from ..account.i18n import I18nMixin
from ..account.types import AddressInput
from ..core.enums import WeightUnitsEnum
from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation
from ..core.types.common import ShopError
from ..product.types import Collection
from .types import AuthorizationKey, AuthorizationKeyType, Shop
class ShopSettingsInput(graphene.InputObjectType):
header_text = graphene.String(description="Header text.")
description = graphene.String(description="SEO description.")
include_taxes_in_prices = graphene.Boolean(description="Include taxes in prices.")
display_gross_prices = graphene.Boolean(
description="Display prices with tax in store."
)
charge_taxes_on_shipping = graphene.Boolean(description="Charge taxes on shipping.")
track_inventory_by_default = graphene.Boolean(
description="Enable inventory tracking."
)
default_weight_unit = WeightUnitsEnum(description="Default weight unit.")
automatic_fulfillment_digital_products = graphene.Boolean(
description="Enable automatic fulfillment for all digital products."
)
default_digital_max_downloads = graphene.Int(
description="Default number of max downloads per digital content URL."
)
default_digital_url_valid_days = graphene.Int(
description="Default number of days which digital content URL will be valid."
)
default_mail_sender_name = graphene.String(
description="Default email sender's name."
)
default_mail_sender_address = graphene.String(
description="Default email sender's address."
)
customer_set_password_url = graphene.String(
description="URL of a view where customers can set their password."
)
class SiteDomainInput(graphene.InputObjectType):
domain = graphene.String(description="Domain name for shop.")
name = graphene.String(description="Shop site name.")
class ShopSettingsUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = ShopSettingsInput(
description="Fields required to update shop settings.", required=True
)
class Meta:
description = "Updates shop settings."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def clean_input(cls, _info, _instance, data):
if data.get("customer_set_password_url"):
try:
validate_storefront_url(data["customer_set_password_url"])
except ValidationError as error:
raise ValidationError(
{"customer_set_password_url": error}, code=ShopErrorCode.INVALID
)
return data
@classmethod
def construct_instance(cls, instance, cleaned_data):
for field_name, desired_value in cleaned_data.items():
current_value = getattr(instance, field_name)
if current_value != desired_value:
setattr(instance, field_name, desired_value)
return instance
@classmethod
def perform_mutation(cls, _root, info, **data):
instance = info.context.site.settings
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
instance.save()
return ShopSettingsUpdate(shop=Shop())
class ShopAddressUpdate(BaseMutation, I18nMixin):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = AddressInput(description="Fields required to update shop address.")
class Meta:
description = (
"Update the shop's address. If the `null` value is passed, the currently "
"selected address will be deleted."
)
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
site_settings = info.context.site.settings
data = data.get("input")
if data:
if not site_settings.company_address:
company_address = account_models.Address()
else:
company_address = site_settings.company_address
company_address = cls.validate_address(data, company_address, info=info)
company_address.save()
site_settings.company_address = company_address
site_settings.save(update_fields=["company_address"])
else:
if site_settings.company_address:
site_settings.company_address.delete()
return ShopAddressUpdate(shop=Shop())
class ShopDomainUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
input = SiteDomainInput(description="Fields required to update site.")
class Meta:
description = "Updates site domain of the shop."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, **data):
site = info.context.site
data = data.get("input")
domain = data.get("domain")
name = data.get("name")
if domain is not None:
site.domain = domain
if name is not None:
site.name = name
cls.clean_instance(info, site)
site.save()
return ShopDomainUpdate(shop=Shop())
class ShopFetchTaxRates(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Meta:
description = "Fetch tax rates."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info):
if not info.context.plugins.fetch_taxes_data():
raise ValidationError(
"Could not fetch tax rates. Make sure you have supplied a "
"valid credential for your tax plugin.",
code=ShopErrorCode.CANNOT_FETCH_TAX_RATES.value,
)
return ShopFetchTaxRates(shop=Shop())
class HomepageCollectionUpdate(BaseMutation):
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
collection = graphene.ID(description="Collection displayed on homepage.")
class Meta:
description = "Updates homepage collection of the shop."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, collection=None):
new_collection = cls.get_node_or_error(
info, collection, field="collection", only_type=Collection
)
site_settings = info.context.site.settings
site_settings.homepage_collection = new_collection
cls.clean_instance(info, site_settings)
site_settings.save(update_fields=["homepage_collection"])
return HomepageCollectionUpdate(shop=Shop())
class AuthorizationKeyInput(graphene.InputObjectType):
key = graphene.String(
required=True, description="Client authorization key (client ID)."
)
password = graphene.String(required=True, description="Client secret.")
class AuthorizationKeyAdd(BaseMutation):
authorization_key = graphene.Field(
AuthorizationKey, description="Newly added authorization key."
)
shop = graphene.Field(Shop, description="Updated shop.")
class Meta:
description = "Adds an authorization key."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
class Arguments:
key_type = AuthorizationKeyType(
required=True, description="Type of an authorization key to add."
)
input = AuthorizationKeyInput(
required=True, description="Fields required to create an authorization key."
)
@classmethod
def perform_mutation(cls, _root, info, key_type, **data):
if site_models.AuthorizationKey.objects.filter(name=key_type).exists():
raise ValidationError(
{
"key_type": ValidationError(
"Authorization key already exists.",
code=ShopErrorCode.ALREADY_EXISTS,
)
}
)
site_settings = info.context.site.settings
instance = site_models.AuthorizationKey(
name=key_type, site_settings=site_settings, **data.get("input")
)
cls.clean_instance(info, instance)
instance.save()
return AuthorizationKeyAdd(authorization_key=instance, shop=Shop())
class AuthorizationKeyDelete(BaseMutation):
authorization_key = graphene.Field(
AuthorizationKey, description="Authorization key that was deleted."
)
shop = graphene.Field(Shop, description="Updated shop.")
class Arguments:
key_type = AuthorizationKeyType(
required=True, description="Type of a key to delete."
)
class Meta:
description = "Deletes an authorization key."
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def perform_mutation(cls, _root, info, key_type):
try:
site_settings = info.context.site.settings
instance = site_models.AuthorizationKey.objects.get(
name=key_type, site_settings=site_settings
)
except site_models.AuthorizationKey.DoesNotExist:
raise ValidationError(
{
"key_type": ValidationError(
"Couldn't resolve authorization key",
code=ShopErrorCode.NOT_FOUND,
)
}
)
instance.delete()
return AuthorizationKeyDelete(authorization_key=instance, shop=Shop())
class StaffNotificationRecipientInput(graphene.InputObjectType):
user = graphene.ID(
required=False,
description="The ID of the user subscribed to email notifications..",
)
email = graphene.String(
required=False,
description="Email address of a user subscribed to email notifications.",
)
active = graphene.Boolean(
required=False, description="Determines if a notification active."
)
class StaffNotificationRecipientCreate(ModelMutation):
class Arguments:
input = StaffNotificationRecipientInput(
required=True,
description="Fields required to create a staff notification recipient.",
)
class Meta:
description = "Creates a new staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
cls.validate_input(instance, cleaned_input)
email = cleaned_input.pop("email", None)
if email:
staff_user = account_models.User.objects.filter(email=email).first()
if staff_user:
cleaned_input["user"] = staff_user
else:
cleaned_input["staff_email"] = email
return cleaned_input
@staticmethod
def validate_input(instance, cleaned_input):
email = cleaned_input.get("email")
user = cleaned_input.get("user")
if not email and not user:
if instance.id and "user" in cleaned_input or "email" in cleaned_input:
raise ValidationError(
{
"staff_notification": ValidationError(
"User and email cannot be set empty",
code=ShopErrorCode.INVALID,
)
}
)
if not instance.id:
raise ValidationError(
{
"staff_notification": ValidationError(
"User or email is required", code=ShopErrorCode.REQUIRED
)
}
)
if user and not user.is_staff:
raise ValidationError(
{
"user": ValidationError(
"User has to be staff user", code=ShopErrorCode.INVALID
)
}
)
class StaffNotificationRecipientUpdate(StaffNotificationRecipientCreate):
class Arguments:
id = graphene.ID(
required=True, description="ID of a staff notification recipient to update."
)
input = StaffNotificationRecipientInput(
required=True,
description="Fields required to update a staff notification recipient.",
)
class Meta:
description = "Updates a staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
class StaffNotificationRecipientDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(
required=True, description="ID of a staff notification recipient to delete."
)
class Meta:
description = "Delete staff notification recipient."
model = account_models.StaffNotificationRecipient
permissions = (SitePermissions.MANAGE_SETTINGS,)
error_type_class = ShopError
error_type_field = "shop_errors"
|
py | 1a412b9dce90de14d26dd857e22c6f246e973f86 | # Lint as: python3
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import utils
from object_detection import argmax_matcher
from object_detection import box_list
from object_detection import faster_rcnn_box_coder
from object_detection import region_similarity_calculator
from object_detection import target_assigner
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
# The minimal score threshold.
MIN_SCORE_THRESH = 0.4
def sigmoid(x):
"""Sigmoid function for use with Numpy for CPU evaluation."""
return 1 / (1 + np.exp(-x))
def decode_box_outputs(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[0] + anchors[2]) / 2
xcenter_a = (anchors[1] + anchors[3]) / 2
ha = anchors[2] - anchors[0]
wa = anchors[3] - anchors[1]
ty, tx, th, tw = rel_codes
w = np.exp(tw) * wa
h = np.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return np.column_stack([ymin, xmin, ymax, xmax])
def decode_box_outputs_tf(rel_codes, anchors):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[..., 0] + anchors[..., 2]) / 2
xcenter_a = (anchors[..., 1] + anchors[..., 3]) / 2
ha = anchors[..., 2] - anchors[..., 0]
wa = anchors[..., 3] - anchors[..., 1]
ty, tx, th, tw = tf.unstack(rel_codes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def diou_nms(dets, iou_thresh=None):
"""DIOU non-maximum suppression.
diou = iou - square of euclidian distance of box centers
/ square of diagonal of smallest enclosing bounding box
Reference: https://arxiv.org/pdf/1911.08287.pdf
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[order[1:]] - intersection)
smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]])
smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]])
smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]])
smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]])
square_of_the_diagonal = (
(smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 +
(smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2)
square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 +
(center_y[i] - center_y[order[1:]])**2)
# Add 1e-10 for numerical stability.
diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10)
inds = np.where(diou <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def hard_nms(dets, iou_thresh=None):
"""The basic hard non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def soft_nms(dets, nms_configs):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = nms_configs.get('method', 'gaussian')
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = nms_configs.get('sigma', 0.5)
iou_thresh = nms_configs.get('iou_thresh', 0.3)
score_thresh = nms_configs.get('score_thresh', 0.001)
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def nms(dets, nms_configs):
"""Non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain parameters.
Returns:
numpy.array: Retained boxes.
"""
nms_configs = nms_configs or {}
method = nms_configs.get('method', None)
if method == 'hard' or not method:
return hard_nms(dets, nms_configs.get('iou_thresh', None))
if method == 'diou':
return diou_nms(dets, nms_configs.get('iou_thresh', None))
if method in ('linear', 'gaussian'):
return soft_nms(dets, nms_configs)
raise ValueError('Unknown NMS method: {}'.format(method))
def _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,
aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
feat_sizes: list of dict of integer numbers of feature map sizes.
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),
feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),
scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: tuple of integer numbers of input image size.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale
anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0
x = np.arange(stride[1] / 2, image_size[1], stride[1])
y = np.arange(stride[0] / 2, image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
def _generate_detections_tf(cls_outputs,
box_outputs,
anchor_boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
soft_nms_sigma=0.25,
iou_threshold=0.5):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
image_size: a tuple (height, width) or an integer for image size.
min_score_thresh: A float representing the threshold for deciding when to
remove boxes based on score.
max_boxes_to_draw: Max number of boxes to draw.
soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;
See Bodla et al, https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
detections: detection results in a tensor with each row representing
[image_id, ymin, xmin, ymax, xmax, score, class]
"""
if not image_size:
raise ValueError('tf version generate_detection needs non-empty image_size')
logging.info('Using tf version of post-processing.')
anchor_boxes = tf.gather(anchor_boxes, indices)
scores = tf.math.sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs_tf(box_outputs, anchor_boxes)
# TF API is slightly different from paper, here we follow the paper value:
# https://github.com/tensorflow/tensorflow/issues/40253.
top_detection_idx, scores = tf.image.non_max_suppression_with_scores(
boxes,
scores,
max_boxes_to_draw,
iou_threshold=iou_threshold,
score_threshold=min_score_thresh,
soft_nms_sigma=soft_nms_sigma)
boxes = tf.gather(boxes, top_detection_idx)
image_size = utils.parse_image_size(image_size)
detections = tf.stack([
tf.cast(tf.tile(image_id, tf.shape(top_detection_idx)), tf.float32),
tf.clip_by_value(boxes[:, 0], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 1], 0, image_size[1]) * image_scale,
tf.clip_by_value(boxes[:, 2], 0, image_size[0]) * image_scale,
tf.clip_by_value(boxes[:, 3], 0, image_size[1]) * image_scale,
scores,
tf.cast(tf.gather(classes, top_detection_idx) + 1, tf.float32)
], axis=1)
return detections
def _generate_detections(cls_outputs, box_outputs, anchor_boxes, indices,
classes, image_id, image_scale, num_classes,
max_boxes_to_draw, nms_configs):
"""Generates detections with model outputs and anchors.
Args:
cls_outputs: a numpy array with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a numpy array with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on
all levels.
indices: a numpy array with shape [N], which is the indices from top-k
selection.
classes: a numpy array with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
image_id: an integer number to specify the image id.
image_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
num_classes: a integer that indicates the number of classes.
max_boxes_to_draw: max number of boxes to draw per image.
nms_configs: A dict of NMS configs.
Returns:
detections: detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
"""
anchor_boxes = anchor_boxes[indices, :]
scores = sigmoid(cls_outputs)
# apply bounding box regression to anchors
boxes = decode_box_outputs(
box_outputs.swapaxes(0, 1), anchor_boxes.swapaxes(0, 1))
boxes = boxes[:, [1, 0, 3, 2]]
# run class-wise nms
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detections_cls = nms(all_detections_cls, nms_configs)
top_detections_cls[:, 2] -= top_detections_cls[:, 0]
top_detections_cls[:, 3] -= top_detections_cls[:, 1]
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detections_cls)),
top_detections_cls,
np.repeat(c + 1, len(top_detections_cls)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections
class Anchors(object):
"""Multi-scale anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = utils.parse_image_size(image_size)
self.feat_sizes = utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.feat_sizes, self.min_level,
self.max_level, self.num_scales,
self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = feat_size['height'] * feat_size[
'width'] * anchors.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size['height'], feat_size['width'], -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
def generate_detections(self,
cls_outputs,
box_outputs,
indices,
classes,
image_id,
image_scale,
image_size=None,
min_score_thresh=MIN_SCORE_THRESH,
max_boxes_to_draw=MAX_DETECTIONS_PER_IMAGE,
disable_pyfun=None,
nms_configs=None):
"""Generate detections based on class and box predictions."""
if disable_pyfun:
return _generate_detections_tf(
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
image_size,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw)
else:
logging.info('nms_configs=%s', nms_configs)
return tf.py_func(
functools.partial(_generate_detections, nms_configs=nms_configs), [
cls_outputs,
box_outputs,
self._anchors.boxes,
indices,
classes,
image_id,
image_scale,
self._num_classes,
max_boxes_to_draw,
], tf.float32)
|
py | 1a412c23b4019aea1b6fcca53f1c924d41847872 | from models.aircraft_category import *
from models.flight import *
from models.pilot_type import *
from models.user import *
from models.airline_identifier import *
|
py | 1a412c7a950eb02e964c072814f1da4886ba2266 | from setuptools import find_packages, setup
setup(
name='predict_pv_yield_nwp',
version='0.1',
packages=find_packages())
|
py | 1a412d75e4898d0ea8dfeb8a36a4a975d17f4944 | ###############################################################################
# Convert coco annotations to a SQLite database #
# #
# #
# (c) 2020 Simon Wenkel #
# Released under the Apache 2.0 license #
# #
###############################################################################
#
# import libraries
#
import argparse
import gc
import json
import sqlite3
import time
from tqdm import tqdm
from joblib import Parallel, delayed
import numpy as np
import numba as nb
from .metrics import bb_iou
#
# functions
#
def is_SQLiteDB(db_file:str)->bool:
"""
Function to check if a file is a valid SQLite database
Inputs:
- file (str) : full path of the file/DB in question
Ouputs:
- is_SQLiteDB (bool) : file is a SQLite DB or not
"""
with open(db_file, "rb") as file:
header = file.read(100)
if header[0:16] == b'SQLite format 3\000':
is_SQLiteDB = True
else:
is_SQLiteDB = False
return is_SQLiteDB
def create_DB(db_conn:sqlite3.Connection,
db_curs:sqlite3.Cursor):
"""
Function to generate all tables required in an empty SQLite database
Inputs:
- db_conn (sqlite3.connector) : database connection
- db_curs (sqlite3.cursor) : database cursor to execute commands
Outputs:
- None
"""
db_curs.execute('''CREATE TABLE images
(`orig_id` INTEGER,
`file_name` TEXT,
`coco_url` TEXT,
`height` INTEGER,
`WIDTH` INTEGER,
`date_captured` TEXT,
`flickr_url` TEXT,
`license` INTEGER,
`subset` TEXT)''')
db_curs.execute('''CREATE TABLE annotations
(`segmentation` TEXT,
`area` REAL,
`iscrowd` INTEGER,
`image_id` INTEGER,
`bbox` TEXT,
`category_id` INTEGER,
`orig_id` INTEGER,
`subset` TEXT,
`isGT` INTEGER)''')
db_curs.execute('''CREATE TABLE supercategories
(`supercategory` TEXT)''')
db_curs.execute('''CREATE TABLE categories
(`category_id` INTEGER,
`name` TEXT,
`supercategory_id` INTEGER)''')
db_curs.execute('''CREATE TABLE licenses
(`url` TEXT,
`license_id` INTEGER,
`name` TEXT,
`subset` TEXT)''')
db_curs.execute('''CREATE TABLE predictions
(`image_id` INTEGER,
`category_id` INTEGER,
`bbox` TEXT,
`score` REAL,
`IoU` REAL,
`is_valid_class_in_img` TEXT,
`best_match_gt_annotation_id` INTEGER,
`model` TEXT,
`comments` TEXT )''')
db_curs.execute('''CREATE TABLE status\
(`model` TEXT,
`subset` TEXT,
`processed` TEXT)''')
db_conn.commit()
print("DB generated.")
def check_if_images_in_db(subset:str,
total_count:int,
db_curs)->bool:
"""
"""
if subset in db_curs.execute("SELECT DISTINCT subset\
FROM images").fetchall()[0]:
imgs_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM images\
WHERE subset=?", \
[subset]).fetchall()[0][0] != total_count:
raise Exception("Subset of images is in DB but inclomplete!")
else:
imgs_in_db = False
return imgs_in_db
def check_if_annotations_in_db(subset:str,
total_count:int,
db_curs)->bool:
"""
"""
if subset in np.array(db_curs.execute("SELECT DISTINCT subset\
FROM annotations").fetchall()):
annot_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM annotations\
WHERE subset=?",\
[subset]).fetchall()[0][0] != total_count:
raise Exception("Subset of annotations is in DB but inclomplete!")
else:
annot_in_db = False
return annot_in_db
def check_if_predictions_in_db(model:str,
total_count:int,
db_curs)->bool:
"""
"""
models = db_curs.execute("SELECT DISTINCT model\
FROM predictions").fetchall()
if len(models) != 0:
models = np.array(models)
if model in models:
annot_in_db = True
# check if subset is complete, throw exception otherwise
if db_curs.execute("SELECT COUNT(*)\
FROM predictions\
WHERE model=?",\
[model]).fetchall()[0][0] != total_count:
raise Exception(model," predictions is in DB but inclomplete!")
else:
annot_in_db = False
else:
annot_in_db = False
return annot_in_db
def image_data_to_list(item:dict,
subset:str)->list:
"""
Assuming the structure of each image dict:
`orig_id` INTEGER,
`file_name` TEXT,
`coco_url` TEXT,
`height` INTEGER,
`WIDTH` INTEGER,
`date_captured` TEXT,
`flickr_url` TEXT,
`license` INTEGER,
`subset` TEXT
Inputs:
- item (dict) : dict containing all data about an image
- subset (str) : is the name of the particular subset the image\
in question is part of
Outputs:
- list_to_move (list) : list containing items as required for \
insert into SQLite table
"""
list_to_move = [item["id"], \
item["file_name"], \
item["coco_url"], \
item["height"], \
item["width"], \
item["date_captured"], \
item["flickr_url"], \
item["license"], \
subset]
return list_to_move
def annotations_to_list(item:dict,
subset:str,
isGT:int)->list:
"""
Assumed table structure for groundtruth annotations:
`segmentation' TEXT,
`area' REAL,
`iscrowd` INTEGER,
`image_id` INTEGER,
`bbox` TEXT,
`category_id` INTEGER,
`orig_id` INTEGER,
`subset` TEXT
`isGT` INTEGER
"""
list_to_move = [json.dumps(item["segmentation"]), \
item["area"], \
item["iscrowd"], \
item["image_id"], \
json.dumps(item["bbox"]), \
item["category_id"], \
item["id"], \
subset,\
isGT]
return list_to_move
def add_gt_annotations(gt:dict,
subset:str,
db_conn:sqlite3.Connection,
db_curs:sqlite3.Cursor,
empty_db:bool = False):
"""
Adding GroundTruth data to the database
Assuming a fully coco compliant json structure
"""
keys = gt.keys()
# min. required keys are "annotations" and "images"
if ("images" not in keys) or ("annotations" not in keys):
raise Exception("Groundtruth data lacks images or annotations.\
Please provide a valid groundtruth annotation file")
# check if images are already in DB
if empty_db or not check_if_images_in_db(subset,\
len(gt["images"]),\
db_curs):
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(image_data_to_list)(item, subset)
for item in tqdm(gt["images"])
)
db_curs.executemany("INSERT INTO images\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print("GT images in DB already.")
# check if annotations are in DB first
if empty_db:
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(annotations_to_list)(item, subset, 1)
for item in tqdm(gt["annotations"])
)
db_curs.executemany("INSERT INTO annotations\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
elif not check_if_annotations_in_db(subset,\
len(gt["annotations"]),\
db_curs):
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(annotations_to_list)(item, subset, 1)
for item in tqdm(gt["annotations"])
)
db_curs.executemany("INSERT INTO annotations\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print("GT annotations in DB already.")
# licenses
if "licenses" in keys:
list_to_move = []
for lic in gt["licenses"]:
list_to_move.append([lic["url"], \
lic["id"], \
lic["name"], \
subset])
db_curs.executemany("INSERT INTO licenses \
VALUES (?,?,?,?)", list_to_move)
db_conn.commit()
# if "catgegories" in keys:
# for cat in gt["categories"]:
# a = 1
def add_predictions_to_db(predictions:list,
model:str,
db_curs:sqlite3.Cursor,
db_conn:sqlite3.Connection):
"""
Assuming the following structure for the predictions table:
`image_id` INTEGER,
`category_id` INTEGER,
`bbox` TEXT,
`score` REAL,
`is_valid_class_in_img` TEXT,
`best_match_gt_annotation_id` INTEGER,
`model` TEXT,
`comments` TEXT
"""
def generate_prediction_list_(item:dict,\
model:str)->list:
"""
"""
prediction = [item["image_id"],
item["category_id"],
json.dumps(item["bbox"]),
item["score"],
"-0.1",
"unknown",
-999,
model,
"none"]
return prediction
if not check_if_predictions_in_db(model,\
len(predictions),\
db_curs):
print("Adding", model)
items_to_insert = Parallel(n_jobs=-1, prefer="threads")(
delayed(generate_prediction_list_)(item, model)
for item in tqdm(predictions)
)
db_curs.executemany("INSERT INTO predictions\
VALUES (?,?,?,?,?,?,?,?,?)",
items_to_insert)
db_conn.commit()
else:
print(model," results already in DB!")
def check_if_model_processed(model,
db_conn,
db_curs):
"""
"""
models_procssed = db_curs.execute("SELECT DISTINCT model\
FROM status").fetchall()
if len(models_procssed) != 0:
models_procssed = np.array(models_procssed)
if model in models_procssed:
is_processed = True
else:
is_processed = False
else:
is_processed = False
return is_processed
def get_image_ids_of_pred(model,
db_conn,
db_curs):
"""
"""
image_ids = np.array(db_curs.execute("SELECT DISTINCT image_id\
FROM predictions\
WHERE model=?",\
[model]).fetchall())
return image_ids
def process_predictions_per_image(image_id,\
subset, \
model, \
db_conn, \
db_curs):
"""
"""
# get all valid categories first
valid_categories = db_curs.execute("SELECT DISTINCT category_id\
FROM annotations\
WHERE subset=? AND image_id=?",\
[subset, image_id]).fetchall()
# returns an Array of tuples, so conversion to np.ndarray
# makes it much easier to find something in it
valid_categories = np.array(valid_categories)
# get groundtruth annotations
gt_annotations = db_curs.execute("SELECT area,bbox,category_id, orig_id\
FROM annotations\
WHERE subset=? AND image_id=?",\
[subset, image_id]).fetchall()
# get predictions
pred_annotations = db_curs.execute("SELECT rowid,bbox,category_id\
FROM predictions\
WHERE model=? AND image_id=?",\
[model, image_id]).fetchall()
correct_class_pred = []
incorrect_class_pred = []
for i in range(len(pred_annotations)):
if pred_annotations[i][2] not in valid_categories:
# append rowid of incorrect class only
incorrect_class_pred.append(pred_annotations[i][0])
else:
# append full prediction
correct_class_pred.append(pred_annotations[i])
# Set all the wrong predictions (classes) to False
for rID in incorrect_class_pred:
db_curs.execute("UPDATE predictions\
SET is_valid_class_in_img=?\
WHERE rowid=?",\
["False", rID])
# cacluate IoUs
for prediction in correct_class_pred:
# best prediction
# format [orig_id, IoU]
best_prediction = [-1, 0.0]
for annotation in gt_annotations:
# check if class is correct
if prediction[2] == annotation[2]:
iou_tmp = bb_iou(json.loads(annotation[1]),\
json.loads(prediction[1]))
if iou_tmp >= best_prediction[1]:
best_prediction = [annotation[3], iou_tmp]
db_curs.execute("UPDATE predictions\
SET (is_valid_class_in_img,\
best_match_gt_annotation_id,\
IoU)=(?,?,?)\
WHERE rowid=?",\
["True",\
best_prediction[0],\
best_prediction[1],\
prediction[0]])
db_conn.commit()
|
py | 1a412f1beb383ce2e4a10976579722217fd3c9b1 | # Copyright 2018 United States Government as represented by the Administrator of
# the National Aeronautics and Space Administration. No copyright is claimed in
# the United States under Title 17, U.S. Code. All Other Rights Reserved.
# The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from SROMPy.target.RandomEntity import RandomEntity
"""
Abstract class defining the target random variable being matched by an SROM.
Inherited by BetaRandomVariable and GammaRandomVariable,
and NormalRandomVariable.
"""
class RandomVariable(RandomEntity):
@abc.abstractmethod
def get_variance(self, max_order):
return
@abc.abstractmethod
def compute_moments(self, x_grid):
return
@abc.abstractmethod
def compute_cdf(self):
return
@abc.abstractmethod
def compute_inv_cdf(self, sample_size):
return
@abc.abstractmethod
def compute_pdf(self):
return
@abc.abstractmethod
def draw_random_sample(self):
return
@abc.abstractmethod
def generate_moments(self):
return
|
py | 1a412fff6b2f24eaf0b1a1cc626c2e17af24e71d | '''
O índice de massa corpórea de uma pessoa (IMC) é dado pelo seu peso (em quilogramas) dividido pelo quadrado de sua altura (em metros). Faça um programa em PYTHON que leia o peso e altura de uma pessoa. Informe a sua situação, de acordo com os critérios a seguir:
IMC <= 18,5 --> Magro
IMC > 18.5 e IMC <= 25.0 --> Normal
IMC > 25.0 e IMC <= 30.0 --> Sobrepeso
IMC > 30.0 --> Obeso
'''
peso = float(input('Informe o seu peso atual: '))
altura = float(input('Informe a sua altura atual: '))
imc = peso / (altura**2)
if imc <= 18.5:
print('MAGRO')
elif imc > 18.5 and imc <= 25.0:
print('NORMAL')
elif imc > 25.0 and imc <= 30.0:
print('SOBREPESO')
elif imc > 30.0:
print('OBESO')
print('O seu IMC é de {:.1f}'.format(imc))
|
py | 1a4130084288f03aec82d8b232cddfeb60945dfd | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-18 15:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0003_auto_20191018_1522'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField()),
('profilepicture', models.ImageField(blank=True, upload_to='images/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a41302e5492f4d3a9f5901de9415e721c8d44c9 | import paddle.fluid as fluid
from paddle.fluid.initializer import NumpyArrayInitializer
import paddorch as torch
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
if stride is None:
stride=kernel_size
return torch.Tensor(fluid.layers.pool2d(input,
pool_size=kernel_size, pool_type="avg", pool_stride=stride,
pool_padding=padding, global_pooling=False, use_cudnn=True,
ceil_mode=ceil_mode, name=None, exclusive=not count_include_pad, data_format="NCHW"))
def max_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
if stride is None:
stride=kernel_size
return torch.Tensor(fluid.layers.pool2d(input,
pool_size=kernel_size, pool_type="max", pool_stride=stride, pool_padding=padding,
global_pooling=False, use_cudnn=True, ceil_mode=ceil_mode, name=None,
exclusive=not count_include_pad, data_format="NCHW"))
def tanh(x):
return torch.Tensor(fluid.layers.tanh(x))
def dropout(input, p=0.5, training=True, inplace=False):
return torch.Tensor(fluid.layers.dropout(input,
p,
is_test=not training,
dropout_implementation='upscale_in_train'))
def softmax(input, dim=None, _stacklevel=3, dtype=None):
return torch.Tensor(fluid.layers.softmax(input,axis=dim))
def embedding(x, weight):
layer=fluid.dygraph.Embedding( size=weight.shape)
# layer.weight.set_value(weight)
fluid.layers.assign(weight,layer.weight)
out=layer(x)
return out
def batch_norm(x, running_mean, running_var, weight=None, bias=None,
training=False, momentum=0.1, eps=1e-5):
layer_object=fluid.dygraph.BatchNorm(x.shape[1],momentum=momentum,epsilon=eps,trainable_statistics=training)
fluid.layers.assign(running_mean,layer_object._mean)
fluid.layers.assign(running_var, layer_object._variance)
if weight is not None:
fluid.layers.assign(weight, layer_object.weight)
if bias is not None:
fluid.layers.assign(bias, layer_object.bias)
return torch.Tensor(layer_object(x))
#TODO: need to do unit test to confirm this function
def linear(input, weight, bias=None):
layer_obj=fluid.dygraph.Linear(input.shape[1],weight.shape[1])
fluid.layers.assign(weight,layer_obj.weight)
if bias is not None:
fluid.layers.assign(bias, layer_obj.bias)
return torch.Tensor(layer_obj(input))
def normalize(input, p=2, dim=1, eps=1e-12, out=None):
return torch.Tensor(fluid.layers.l2_normalize(input,axis=dim,epsilon=eps))
def sigmoid(x):
return torch.Tensor(fluid.layers.sigmoid(x))
def binary_cross_entropy_with_logits(logits, targets):
return fluid.layers.sigmoid_cross_entropy_with_logits(logits, targets)
def adaptive_avg_pool2d(input, output_size):
return torch.Tensor(fluid.layers.adaptive_pool2d(input,pool_size=output_size,pool_type="avg"))
def adaptive_max_pool2d(input, output_size):
return torch.Tensor(fluid.layers.adaptive_pool2d(input,pool_size=output_size,pool_type="max"))
def leaky_relu(input, negative_slope=0.01, inplace=False):
return torch.Tensor(fluid.layers.leaky_relu(input, alpha=negative_slope, name=None))
def relu(input,inplace=False):
return torch.Tensor(fluid.layers.relu(input))
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=False,align_mode=1,data_format='NCHW'):
if isinstance(size,int):
size=[size,size]
return torch.Tensor(fluid.layers.interpolate(input,
out_shape=size,
scale=scale_factor,
name=None,
resample=mode.upper(),
actual_shape=None,
align_corners=align_corners,
align_mode=align_mode,
data_format=data_format))
def conv2d(input, weight, bias=None, stride=1, padding=1,dilation=1, groups=1):
if bias is None:
bias_attr=False
else:
bias_attr=None
layer=fluid.dygraph.Conv2D(num_channels=weight.shape[1], num_filters=weight.shape[0],filter_size=weight.shape[-2:],stride=stride,padding=padding,dilation=dilation,groups=groups,bias_attr=bias_attr)
# layer.weight.set_value(weight)
fluid.layers.assign(weight,layer.weight)
if bias is not None:
# layer.bias.set_value(bias)
fluid.layers.assign(bias, layer.bias)
out=layer(input)
return out
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if bias is None:
bias_attr=False
else:
bias_attr=None
layer=fluid.dygraph.Conv2DTranspose(num_channels=weight.shape[0], num_filters=weight.shape[1],filter_size=weight.shape[-2:],stride=stride,padding=padding,dilation=dilation,groups=groups,bias_attr=bias_attr)
# layer.weight.set_value(weight)
fluid.layers.assign(weight,layer.weight)
if bias is not None:
# layer.bias.set_value(bias)
fluid.layers.assign(bias, layer.bias)
out=layer(input)
return out
# from torch.nn.functional import l1_loss,mse_loss,binary_cross_entropy_with_logits
#
# def l1_loss(input, target, size_average=None, reduce=None, reduction='mean'):
# return fluid.dygraph.L1Loss() |
py | 1a41306bf09f7c0dff4d573e6bb45701b82af2a4 | import mailbox
import quopri
import email.utils
import lxml.html.clean
import re
def read_mail(path):
mdir = mailbox.Maildir(path)
return mdir
def extract_email_headers(msg):
"""Extract headers from email"""
msg_obj = {}
msg_obj["from"] = {}
from_field = msg.getheaders('From')[0]
msg_obj["from"]["name"], msg_obj["from"]["address"] = email.utils.parseaddr(from_field)
msg_obj["to"] = email.utils.getaddresses(msg.getheaders('To'))
msg_obj["subject"] = msg.getheaders('Subject')[0]
msg_obj["date"] = msg.getheaders('Date')[0]
return msg_obj
def format_plaintext_email(message):
"""Replace \n by <br> to display as HTML"""
return message.replace('\n', '<br>')
def extract_email(msg):
"""Extract all the interesting fields from an email"""
msg_obj = extract_email_headers(msg)
fpPos = msg.fp.tell()
msg.fp.seek(0)
mail = email.message_from_string(msg.fp.read())
contents = []
for part in mail.walk():
if part.get_content_type() == 'text/plain':
charset = part.get_content_charset()
if charset != None:
payload = quopri.decodestring(part.get_payload()).decode(charset)
else: # assume ascii
payload = quopri.decodestring(part.get_payload()).decode('ascii')
payload = format_plaintext_email(payload)
contents.append(payload)
content = "".join(contents)
msg_obj["contents"] = lxml.html.clean.clean_html(content).encode('utf-8')
return msg_obj
def get_emails(mdir):
l = []
for id, msg in mdir.iteritems():
msg_obj = extract_email(msg)
msg_obj["id"] = id
l.append(msg_obj)
return l
def get_email(mdir, id):
msg = mdir.get(id)
return extract_email(msg)
|
py | 1a41309c9858339c8781540f86f36cb238c1b670 | # program to delete a specific item from a given doubly linked list.
class Node(object):
# Singly linked node
def __init__(self, value=None, next=None, prev=None):
self.value = value
self.next = next
self.prev = prev
class doubly_linked_list(object):
def __init__(self):
self.head = None
self.tail = None
self.count = 0
def append_item(self, value):
# Append an item
new_item = Node(value, None, None)
if self.head is None:
self.head = new_item
self.tail = self.head
else:
new_item.prev = self.tail
self.tail.next = new_item
self.tail = new_item
self.count += 1
def iter(self):
# Iterate the list
current = self.head
while current:
item_val = current.value
current = current.next
yield item_val
def print_foward(self):
for node in self.iter():
print(node)
def search_item(self, val):
for node in self.iter():
if val == node:
return True
return False
def delete(self, value):
# Delete a specific item
current = self.head
node_deleted = False
if current is None:
node_deleted = False
elif current.value == value:
self.head = current.next
self.head.prev = None
node_deleted = True
elif self.tail.value == value:
self.tail = self.tail.prev
self.tail.next = None
node_deleted = True
else:
while current:
if current.value == value:
current.prev.next = current.next
current.next.prev = current.prev
node_deleted = True
current = current.next
if node_deleted:
self.count -= 1
items = doubly_linked_list()
items.append_item('PHP')
items.append_item('Python')
items.append_item('C#')
items.append_item('C++')
items.append_item('Java')
items.append_item('SQL')
print("Original list:")
items.print_foward()
items.delete("Java")
items.delete("Python")
print("\nList after deleting two items:")
items.print_foward()
|
py | 1a41323031212524bcfdb228e43811f3a29579ac | from unittest import TestCase, main
from unittest.mock import patch, MagicMock
from monolithcaching.register import Register, RegisterError
class TestRegister(TestCase):
@patch("monolithcaching.register.StrictRedis")
def test___init__(self, mock_redis):
test = Register(host="localhost", port=12345)
self.assertEqual(mock_redis.return_value, test._connection)
mock_redis.assert_called_once_with(host="localhost", port=12345, db=0)
@patch("monolithcaching.register.Register.__init__")
def test_get_count(self, mock_init):
mock_init.return_value = None
test = Register(host="localhost", port=12345)
test._connection = MagicMock()
test._connection.hget.return_value = "3"
outcome = test.get_count(cache_path="test path")
self.assertEqual(3, outcome)
test._connection.hget.assert_called_once_with(name="CACHE_REGISTER", key="test path")
test._connection.hget.reset_mock()
test._connection.hget.return_value = None
outcome = test.get_count(cache_path="test path")
self.assertEqual(None, outcome)
test._connection.hget.assert_called_once_with(name="CACHE_REGISTER", key="test path")
@patch("monolithcaching.register.Register.get_count")
@patch("monolithcaching.register.Register.__init__")
def test_register_cache(self, mock_init, mock_get_count):
mock_init.return_value = None
test = Register(host="localhost", port=12345)
test._connection = MagicMock()
mock_get_count.return_value = 3
test.register_cache(cache_path="test path")
test._connection.hset.assert_called_once_with(name="CACHE_REGISTER", key="test path", value="4")
test._connection.hset.reset_mock()
mock_get_count.return_value = None
test.register_cache(cache_path="test path")
test._connection.hset.assert_called_once_with(name="CACHE_REGISTER", key="test path", value="1")
@patch("monolithcaching.register.Register.get_count")
@patch("monolithcaching.register.Register.__init__")
def test_deregister_cache(self, mock_init, mock_get_count):
mock_init.return_value = None
test = Register(host="localhost", port=12345)
test._connection = MagicMock()
mock_get_count.return_value = 3
test.deregister_cache(cache_path="test path", locked=False)
test._connection.hset.assert_called_once_with(name="CACHE_REGISTER", key="test path", value="2")
test._connection.hset.reset_mock()
mock_get_count.return_value = 1
test.deregister_cache(cache_path="test path", locked=False)
test._connection.hdel.assert_called_once_with("CACHE_REGISTER", "test path")
test._connection.hdel.reset_mock()
mock_get_count.return_value = 3
test.deregister_cache(cache_path="test path", locked=True)
test._connection.hset.assert_called_once_with(name="CACHE_REGISTER", key="test path", value="2")
test._connection.hset.reset_mock()
mock_get_count.return_value = 1
test.deregister_cache(cache_path="test path", locked=True)
self.assertEqual(0, len(test._connection.hdel.call_args_list))
mock_get_count.return_value = None
with self.assertRaises(RegisterError) as context:
test.deregister_cache(cache_path="test path", locked=True)
self.assertEqual("cache test path is not in cache register so it cannot be de-registered",
str(context.exception))
@patch("monolithcaching.register.Register.__init__")
def test_get_all_records(self, mock_init):
mock_init.return_value = None
test = Register(host="localhost", port=12345)
test._connection = MagicMock()
outcome = test.get_all_records()
self.assertEqual(test._connection.hgetall.return_value, outcome)
test._connection.hgetall.assert_called_once_with(name="CACHE_REGISTER")
if __name__ == "__main__":
main()
|
py | 1a413247477cbf35be735bfe6d6b66b7ad6322c2 | #impa entre 0 e 100
cn = 1
cm = 100
for cn in range(cm):
if (cn % 2) == 1: #troque o == 1 por == 0 para por o par como verdadeiro
print(f"[{cn}] Esse numero é ímpa.")
else:
print(f"[{cn}] Esse numero é par.") |
py | 1a41328c26379ddea4aa5932dd28ee7b5c9e5451 | from __future__ import absolute_import, division, print_function
import tensorflow as tf
import numpy as np
import os
import zipfile
def _parse_flat(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=1) # the image gets decoded in the shape of height,width,channels
image_reshaped = tf.reshape(image_decoded, (-1,)) # flatten the tensor
image_casted = tf.cast(image_reshaped, tf.float32) # Convert the array to float32 as opposed to uint8
image_casted /= 255 # Convert the pixel values from integers between 0 and 255 to floats between 0 and 1
return image_casted, label
def _parse(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=1) # the image gets decoded in the shape of height,width,channels
image_casted = tf.cast(image_decoded, tf.float32) # Convert the array to float32 as opposed to uint8
image_casted /= 255 # Convert the pixel values from integers between 0 and 255 to floats between 0 and 1
return image_casted, label
def load_data(image_dir, number_of_outputs=None, flatten=None, batch_size=None, shuffle_size=None, percent_of_test_examples=None):
subdirs = [x[1] for x in os.walk(image_dir)][0]
label_enums = []
trainFileList = []
trainLabelList = []
testFileList = []
testLabelList = []
if(percent_of_test_examples is None):
percent_of_test_examples = 0.1
for subdir in subdirs:
files = os.listdir(image_dir+"/"+subdir)
files = [image_dir+"/"+subdir+'/'+f for f in files]
if(subdir not in label_enums):
label_enums.append(subdir)
number_of_test_examples = int(percent_of_test_examples * len(files))
trainFiles = files[number_of_test_examples:]
trainFileList.extend(trainFiles)
trainLabelList.extend([label_enums.index(subdir)]*len(trainFiles))
testFiles = files[:number_of_test_examples]
testFileList.extend(testFiles)
testLabelList.extend([label_enums.index(subdir)]*len(testFiles))
trainFileList = tf.constant(trainFileList)
trainLabelList = tf.keras.utils.to_categorical(trainLabelList, number_of_outputs) # The format of the labels
trainLabelList = trainLabelList.astype(np.float32) # Cast the labels to floats
train_dataset = tf.data.Dataset.from_tensor_slices((trainFileList, trainLabelList))
testFileList = tf.constant(testFileList)
testLabelList = tf.keras.utils.to_categorical(testLabelList, number_of_outputs) # The format of the labels
testLabelList = testLabelList.astype(np.float32) # Cast the labels to floats
test_dataset = tf.data.Dataset.from_tensor_slices((testFileList, testLabelList))
if(flatten is None):
train_dataset = train_dataset.map(_parse)
test_dataset = test_dataset.map(_parse)
elif(flatten):
train_dataset = train_dataset.map(_parse_flat)
test_dataset = test_dataset.map(_parse_flat)
else:
train_dataset = train_dataset.map(_parse)
test_dataset = test_dataset.map(_parse)
# shuffle
if(shuffle_size is not None):
train_dataset = train_dataset.shuffle(shuffle_size)
# create batch
if(batch_size is not None):
train_dataset = train_dataset.batch(batch_size)
else:
train_dataset = train_dataset.batch()
test_dataset = test_dataset.batch(len(testLabelList))
return train_dataset, test_dataset
def load_one_data(image_dir, number_of_outputs=None, flatten=None):
image_set_dir = image_dir[:image_dir.rindex('/')]
image_set_dir = image_set_dir[:image_set_dir.rindex('/')]
subdirs = [x[1] for x in os.walk(image_set_dir)][0]
label_enums = []
testFileList = []
testLabelList = []
for subdir in subdirs:
if(subdir not in label_enums):
label_enums.append(subdir)
label = os.path.split(os.path.dirname(image_dir))[-1]
testFileList = tf.constant([image_dir])
testLabelList = tf.keras.utils.to_categorical([label_enums.index(label)], number_of_outputs) # The format of the labels
testLabelList = testLabelList.astype(np.float32) # Cast the labels to floats
test_dataset = tf.data.Dataset.from_tensor_slices((testFileList, testLabelList))
test_dataset = test_dataset.map(_parse)
test_dataset = test_dataset.batch(1)
return test_dataset
def prepare_data(image_dir):
# look for .zip files and unzip them
# returns number labels (folders) in the image_dir
subdirs = [x[1] for x in os.walk(image_dir)][0]
files = [x[2] for x in os.walk(image_dir)][0]
zip_files = list(filter(lambda file: file.endswith('.zip'), files))
dirs = set(subdirs)
for zip_file in zip_files:
if not zip_file[:-4] in dirs:
_unzip(zip_file,image_dir)
else:
print('found ' + zip_file + ' already unzipped')
labels = [x[1] for x in os.walk(image_dir)][0]
print('labels:', labels)
return labels
def _unzip(source,image_dir):
print('unzipping ' + source)
with zipfile.ZipFile(image_dir+"/"+source,"r") as zip_ref:
zip_ref.extractall(image_dir+"/"+source[:-4])
return True
|
py | 1a4132952d520b2bee1b100eaaefdf73c7a5179a | '''test pysftp.Connection.stat and .lstat - uses py.test'''
# pylint: disable = W0142
# pylint: disable=E1101
from common import *
def test_stat(psftp):
'''test stat'''
dirname = 'pub'
psftp.chdir('/home/test')
rslt = psftp.stat(dirname)
assert rslt.st_size >= 0
def test_lstat(psftp):
'''test lstat minimal'''
dirname = 'pub'
psftp.chdir('/home/test')
rslt = psftp.lstat(dirname)
assert rslt.st_size >= 0
|
py | 1a4132e9b58dd3677645f2a994b59284b0df0098 | import os
from pathlib import Path
from shutil import which
from invoke import task
PKG_NAME = "conda_hooks"
PKG_PATH = Path(f"{PKG_NAME}")
ACTIVE_VENV = os.environ.get("VIRTUAL_ENV", None)
VENV_HOME = Path(os.environ.get("WORKON_HOME", "~/.local/share/virtualenvs"))
VENV_PATH = Path(ACTIVE_VENV) if ACTIVE_VENV else (VENV_HOME / PKG_NAME)
VENV = str(VENV_PATH.expanduser())
TOOLS = ["poetry", "pre-commit"]
POETRY = which("poetry") if which("poetry") else (VENV / Path("bin") / "poetry")
PRECOMMIT = (
which("pre-commit") if which("pre-commit") else (VENV / Path("bin") / "pre-commit")
)
@task
def tests(c):
"""Run the test suite"""
c.run(f"{VENV}/bin/pytest", pty=True)
@task
def black(c, check=False, diff=False):
"""Run Black auto-formatter, optionally with --check or --diff"""
check_flag, diff_flag = "", ""
if check:
check_flag = "--check"
if diff:
diff_flag = "--diff"
c.run(f"{VENV}/bin/black {check_flag} {diff_flag} {PKG_PATH} tasks.py")
@task
def isort(c, check=False, diff=False):
check_flag, diff_flag = "", ""
if check:
check_flag = "-c"
if diff:
diff_flag = "--diff"
c.run(f"{VENV}/bin/isort {check_flag} {diff_flag} .")
@task
def flake8(c):
c.run(f"{VENV}/bin/flake8 {PKG_PATH} tasks.py")
@task
def lint(c):
isort(c, check=True)
black(c, check=True)
flake8(c)
@task
def tools(c):
"""Install tools in the virtual environment if not already on PATH"""
for tool in TOOLS:
if not which(tool):
c.run(f"{VENV}/bin/pip install {tool}")
@task
def precommit(c):
"""Install pre-commit hooks to .git/hooks/pre-commit"""
c.run(f"{PRECOMMIT} install")
@task
def setup(c):
c.run(f"{VENV}/bin/pip install -U pip")
tools(c)
c.run(f"{POETRY} install")
precommit(c)
|
py | 1a41330c151ac0b36bba4fa33ab06a3b2ff46a96 | from Maix import I2S, GPIO
from fpioa_manager import fm
from modules import SpeechRecognizer
import utime, time
# register i2s(i2s0) pin
fm.register(20, fm.fpioa.I2S0_OUT_D0, force=True)
fm.register(18, fm.fpioa.I2S0_SCLK, force=True)
fm.register(19, fm.fpioa.I2S0_WS, force=True)
# close WiFi, if use M1W Core module
if True:
fm.register(8, fm.fpioa.GPIO0, force=True)
wifi_en=GPIO(GPIO.GPIO0,GPIO.OUT)
wifi_en.value(0)
sample_rate = 8000
# init i2s(i2s0)
i2s_dev = I2S(I2S.DEVICE_0)
# config i2s according to speechrecognizer
i2s_dev.channel_config(i2s_dev.CHANNEL_0,
I2S.RECEIVER,
resolution = I2S.RESOLUTION_16_BIT,
cycles = I2S.SCLK_CYCLES_32,
align_mode = I2S.RIGHT_JUSTIFYING_MODE)
i2s_dev.set_sample_rate(sample_rate)
s = SpeechRecognizer(i2s_dev)
type(s)
print(s)
key_word_record = False
tim2 = time.ticks_ms()
def pins_irq(pin_num):
global key_word_record
global tim2
if (time.ticks_ms() - tim2 )> 800:
key_word_record = not key_word_record
tim2 = time.ticks_ms()
fm.register(16, fm.fpioa.GPIOHS0)
key_boot = GPIO(GPIO.GPIOHS0, GPIO.IN)
key_boot.irq(pins_irq, GPIO.IRQ_FALLING, GPIO.WAKEUP_NOT_SUPPORT, 7)
#Currently supports a maximum of 10 keywords, each recording a maximum of 4 templates
for i in range(3):
# Record three keywords, three times each
for j in range(3):
print("Press the button to record the {} keyword, the {}".format(i+1, j+1))
while True:
if key_word_record == True:
break
else:
print('.', end="")
utime.sleep_ms(500)
print("---")
s.record(i, j)
key_word_record = False
print("record successful!")
while True:
# recognize
ret = s.recognize()
if ret > 0:
if ret == 1:
print("ret:{}-{}".format(ret, "red"))
elif ret == 2:
print("ret:{}-{}".format(ret, "green"))
elif ret == 3:
print("ret:{}-{}".format(ret, "blue"))
else:
print("")
|
py | 1a4134a309e0eb7d6e7e8cac28d34037d9c503f2 | from flask_restful import Resource
from api.models.unit import UnitModel
from flask import jsonify
class Unit(Resource):
def get(self, _id):
unit = None
if _id.isdigit():
unit = UnitModel.find_by_id(_id)
else:
unit = UnitModel.find_by_name(_id)
if unit:
return jsonify(unit.json())
return {'message': 'Unit not found'}, 404
class UnitList(Resource):
def get(self):
return jsonify(list(map(lambda x: x.json(),
UnitModel.query.all())))
|
py | 1a4135229b90f5c89d6eda765a9290795b553243 | from django.apps import AppConfig
class UserloginConfig(AppConfig):
name = 'userlogin'
|
py | 1a4135aff8b37b16feb9c47c2ef27e6d5f4abe2b | import logging
from copy import deepcopy
from datetime import timezone
from typing import Any, Dict, List, Optional
import pytz
import requests
from dateutil import parser
from obsei.sink.base_sink import Convertor
from obsei.sink.http_sink import HttpSink, HttpSinkConfig
from obsei.payload import TextPayload
from obsei.misc.utils import flatten_dict
logger = logging.getLogger(__name__)
TWITTER_URL_PREFIX = "https://twitter.com/"
IST_TZ = pytz.timezone("Asia/Kolkata")
class PayloadConvertor(Convertor):
def convert(
self,
analyzer_response: TextPayload,
base_payload: Optional[Dict[str, Any]] = None,
**kwargs,
) -> Dict[str, Any]:
request_payload = base_payload or {}
if analyzer_response.source_name != "Twitter":
return {**request_payload, **analyzer_response.to_dict()}
source_information = kwargs["source_information"]
user_url = ""
positive = 0.0
negative = 0.0
text = ""
tweet_id = None
created_at_str = None
classification_list: List[str] = []
flat_dict = flatten_dict(analyzer_response.to_dict())
for k, v in flat_dict.items():
if "username" in k:
user_url = TWITTER_URL_PREFIX + v
elif "text" in k:
text = str(v).replace("\n", " ")
elif "positive" in k:
positive = float(v)
elif "negative" in k:
negative = float(v)
elif "meta_id" in k:
tweet_id = v
elif "created_at" in k:
created_at_str = v
elif "segmented_data" in k and len(classification_list) < 2:
classification_list.append(k.rsplit("_", 1)[1])
if created_at_str:
created_at = parser.isoparse(created_at_str)
created_at_str = (
created_at.replace(tzinfo=timezone.utc)
.astimezone(tz=IST_TZ)
.strftime("%Y-%m-%d %H:%M:%S")
)
tweet_url = f"{user_url}/status/{tweet_id}"
# Sentiment rules
if negative > 8.0:
sentiment = "Strong Negative"
elif 0.3 < negative <= 8.0:
sentiment = "Negative"
elif positive >= 0.8:
sentiment = "Strong Positive"
elif 0.4 < positive < 0.8:
sentiment = "Positive"
else:
sentiment = "Neutral"
enquiry = {
"Source": source_information,
"FeedbackBy": user_url,
"Sentiment": sentiment,
"TweetUrl": tweet_url,
"FormattedText": text,
"PredictedCategories": ",".join(classification_list),
}
if created_at_str:
enquiry["ReportedAt"] = created_at_str
kv_str_list = [k + ": " + str(v) for k, v in enquiry.items()]
request_payload["enquiryMessage"] = "\n".join(kv_str_list)
return request_payload
class DailyGetSinkConfig(HttpSinkConfig):
TYPE: str = "DailyGet"
partner_id: str
consumer_phone_number: str
source_information: str
headers: Dict[str, Any] = {"Content-type": "application/json"}
class DailyGetSink(HttpSink):
def __init__(self, convertor: Convertor = PayloadConvertor(), **data: Any):
super().__init__(convertor=convertor, **data)
def send_data( # type: ignore[override]
self,
analyzer_responses: List[TextPayload],
config: DailyGetSinkConfig,
**kwargs,
):
headers = config.headers
payloads = []
responses = []
for analyzer_response in analyzer_responses:
payloads.append(
self.convertor.convert(
analyzer_response=analyzer_response,
base_payload=dict()
if config.base_payload is None
else deepcopy(config.base_payload),
source_information=config.source_information,
)
)
for payload in payloads:
response = requests.post(
url=config.url,
json=payload,
headers=headers,
)
logger.info(f"payload='{payload}'")
logger.info(f"response='{response.__dict__}'")
responses.append(response)
return responses
|
py | 1a41386b9ab5232aedacdcaeee6619fc90866244 | print("__name__ is", __name__)
|
py | 1a4139c3a84f345df3960f92dc13a4f39d312368 | # -*- coding: utf-8 -*-
# Example for using WebDriver object: driver = self.get_current_driver() e.g driver.current_url
from QAutoLibrary.extension import TESTDATA
from selenium.webdriver.common.by import By
from QAutoLibrary.QAutoSelenium import *
from time import sleep
class Cs_backup_restore_dlg_up_back_conf_exist(CommonUtils):
"""
"""
# Pagemodel timestamp: 20171019021218
# Pagemodel url: https://xroad-lxd-cs.lxd:4000/backup
# Pagemodel area: (593, 355, 735, 146)
# Pagemodel screen resolution: (1920, 975)
# Use project settings: True
# Used filters: id, css_selector, class_name, link_text, xpath
# Xpath type: xpath-position
# Create automated methods: False
# Depth of css path: 3
# Minimize css selector: True
# Use css pattern: False
# Allow non unique css pattern: False
# Pagemodel template: False
# Use testability: True
# testability attribute: data-name
# Use contains text in xpath: False
# Exclude dynamic table filter: True
# Row count: 5
# Element count: 20
# Big element filter width: 55
# Big element filter height: 40
# Not filtered elements: button, strong, select
# Canvas modeling: False
# Pagemodel type: normal
# Links found: 0
# Page model constants:
DATA_NAME_FILE_UPLOAD_UI_RESIZABLE_W = (By.CSS_SELECTOR, u'div[data-name="file_upload_dialog"]>.ui-resizable-w') # x: 605 y: 312 width: 7 height: 230, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
DATA_NAME_FILE_UPLOAD_UI_RESIZABLE_E = (By.CSS_SELECTOR, u'div[data-name="file_upload_dialog"]>.ui-resizable-e') # x: 1308 y: 312 width: 7 height: 230, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
SUBMIT_0 = (By.XPATH, u'//div[8]/div[1]/div[1]/button[1]') # x: 1228 y: 352 width: 51 height: 49, tag: button, type: submit, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
SUBMIT = (By.XPATH, u'//div[8]/div[1]/div[1]/button[2]') # x: 1279 y: 352 width: 51 height: 49, tag: button, type: submit, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
ID_UI_ID_4 = (By.ID, u'ui-id-4') # x: 601 y: 366 width: 167 height: 21, tag: span, type: , name: None, form_id: , checkbox: , table_id: , href: None
UNKNOWN = (By.XPATH, u'//div[8]/div[1]/div[1]/button[1]/i[1]') # x: 1243 y: 366 width: 21 height: 21, tag: i, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href:
UNKNOWN_0 = (By.XPATH, u'//div[8]/div[1]/div[1]/button[2]/i[1]') # x: 1299 y: 369 width: 12 height: 15, tag: i, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href:
SELECTED_FILE_C_FAKEPATH_CONF_BACKUP_20171018_230834_TAR_TEXT = (By.CSS_SELECTOR, u'.selected_file') # x: 630 y: 382 width: 569 height: 32, tag: input, type: text, name: None, form_id: , checkbox: , table_id: 2, href:
UI_WIDGET_CONTENT_CORNER_ALL_FRONT_BUTTONS_DRAGGABLE_RESIZABLE_CONFIRM = (By.CSS_SELECTOR, u'div.ui-dialog.ui-widget.ui-widget-content.ui-corner-all.ui-front.ui-dialog-buttons.ui-draggable.ui-resizable>#confirm') # x: 591 y: 403 width: 740 height: 53, tag: div, type: , name: None, form_id: , checkbox: , table_id: , href:
CANCEL_0 = (By.XPATH, u'//div[11]/div[1]/button[2]') # x: 1155 y: 461 width: 75 height: 36, tag: button, type: button, name: None, form_id: upload_new, checkbox: , table_id: 2, href:
UI_BUTTONSET_CONFIRM = (By.CSS_SELECTOR, u'div.ui-dialog-buttonset>#confirm') # x: 1240 y: 461 width: 85 height: 36, tag: button, type: button, name: None, form_id: , checkbox: , table_id: , href:
CANCEL = (By.XPATH, u'//div[11]/div[1]/button[2]/span[1]') # x: 1168 y: 470 width: 49 height: 18, tag: span, type: , name: None, form_id: upload_new, checkbox: , table_id: 2, href: None
CONFIRM_UI_TEXT = (By.CSS_SELECTOR, u'#confirm>.ui-button-text') # x: 1253 y: 470 width: 59 height: 18, tag: span, type: , name: None, form_id: , checkbox: , table_id: , href: None
def click_button_confirm(self, parameters=None):
"""
Click button confirm
:param parameters: Test data section dictionary
"""
self.click_element(self.UI_BUTTONSET_CONFIRM)
|
py | 1a413a0db977ef7077095145b69022a324aee684 | from pynput.mouse import *
import random
from time import sleep
import subprocess
subprocess.call("pip install pynput",shell=True)
mouse = Controller()
def randomMousePosition():
random_x = random.randint(1,10000)
random_y = random.randint(1,10000)
moveMouse(random_x,random_y)
def moveMouse(x,y):
mouse.move(x,y)
print('[+] Mouse moved')
while True:
randomMousePosition()
sleep(1)
mouse.click(Button.left,1)
mouse.click(Button.right,1) |
py | 1a413ba9104a475dc0f57081bbdaa0ecf6cdd894 |
from constants import *
from mobject.mobject import Mobject
from utils.bezier import interpolate
from utils.color import color_gradient
from utils.color import color_to_rgba
from utils.color import rgba_to_color
from utils.config_ops import digest_config
from utils.iterables import stretch_array_to_length
from utils.space_ops import get_norm
class PMobject(Mobject):
CONFIG = {
"stroke_width": DEFAULT_STROKE_WIDTH,
}
def reset_points(self):
self.rgbas = np.zeros((0, 4))
self.points = np.zeros((0, 3))
return self
def get_array_attrs(self):
return Mobject.get_array_attrs(self) + ["rgbas"]
def add_points(self, points, rgbas=None, color=None, alpha=1):
"""
points must be a Nx3 numpy array, as must rgbas if it is not None
"""
if not isinstance(points, np.ndarray):
points = np.array(points)
num_new_points = len(points)
self.points = np.append(self.points, points, axis=0)
if rgbas is None:
color = Color(color) if color else self.color
rgbas = np.repeat(
[color_to_rgba(color, alpha)],
num_new_points,
axis=0
)
elif len(rgbas) != len(points):
raise Exception("points and rgbas must have same shape")
self.rgbas = np.append(self.rgbas, rgbas, axis=0)
return self
def set_color(self, color=YELLOW_C, family=True):
rgba = color_to_rgba(color)
mobs = self.family_members_with_points() if family else [self]
for mob in mobs:
mob.rgbas[:, :] = rgba
self.color = color
return self
# def set_color_by_gradient(self, start_color, end_color):
def set_color_by_gradient(self, *colors):
self.rgbas = np.array(list(map(
color_to_rgba,
color_gradient(colors, len(self.points))
)))
return self
start_rgba, end_rgba = list(map(color_to_rgba, [start_color, end_color]))
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
mob.rgbas = np.array([
interpolate(start_rgba, end_rgba, alpha)
for alpha in np.arange(num_points) / float(num_points)
])
return self
def set_colors_by_radial_gradient(self, center=None, radius=1, inner_color=WHITE, outer_color=BLACK):
start_rgba, end_rgba = list(map(color_to_rgba, [start_color, end_color]))
if center is None:
center = self.get_center()
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
t = min(1, np.abs(mob.get_center() - center) / radius)
mob.rgbas = np.array(
[interpolate(start_rgba, end_rgba, t)] * num_points
)
return self
def match_colors(self, mobject):
Mobject.align_data(self, mobject)
self.rgbas = np.array(mobject.rgbas)
return self
def filter_out(self, condition):
for mob in self.family_members_with_points():
to_eliminate = ~np.apply_along_axis(condition, 1, mob.points)
mob.points = mob.points[to_eliminate]
mob.rgbas = mob.rgbas[to_eliminate]
return self
def thin_out(self, factor=5):
"""
Removes all but every nth point for n = factor
"""
for mob in self.family_members_with_points():
num_points = self.get_num_points()
mob.apply_over_attr_arrays(
lambda arr: arr[
np.arange(0, num_points, factor)
]
)
return self
def sort_points(self, function=lambda p: p[0]):
"""
function is any map from R^3 to R
"""
for mob in self.family_members_with_points():
indices = np.argsort(
np.apply_along_axis(function, 1, mob.points)
)
mob.apply_over_attr_arrays(lambda arr: arr[indices])
return self
def fade_to(self, color, alpha):
self.rgbas = interpolate(self.rgbas, color_to_rgba(color), alpha)
for mob in self.submobjects:
mob.fade_to(color, alpha)
return self
def get_all_rgbas(self):
return self.get_merged_array("rgbas")
def ingest_submobjects(self):
attrs = self.get_array_attrs()
arrays = list(map(self.get_merged_array, attrs))
for attr, array in zip(attrs, arrays):
setattr(self, attr, array)
self.submobjects = []
return self
def get_color(self):
return rgba_to_color(self.rgbas[0, :])
def point_from_proportion(self, alpha):
index = alpha * (self.get_num_points() - 1)
return self.points[index]
# Alignment
def align_points_with_larger(self, larger_mobject):
assert(isinstance(larger_mobject, PMobject))
self.apply_over_attr_arrays(
lambda a: stretch_array_to_length(
a, larger_mobject.get_num_points()
)
)
def get_point_mobject(self, center=None):
if center is None:
center = self.get_center()
return Point(center)
def interpolate_color(self, mobject1, mobject2, alpha):
self.rgbas = interpolate(
mobject1.rgbas, mobject2.rgbas, alpha
)
def pointwise_become_partial(self, mobject, a, b):
lower_index, upper_index = [
int(x * mobject.get_num_points())
for x in (a, b)
]
for attr in self.get_array_attrs():
full_array = getattr(mobject, attr)
partial_array = full_array[lower_index:upper_index]
setattr(self, attr, partial_array)
# TODO, Make the two implementations bellow non-redundant
class Mobject1D(PMobject):
CONFIG = {
"density": DEFAULT_POINT_DENSITY_1D,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.epsilon = 1.0 / self.density
Mobject.__init__(self, **kwargs)
def add_line(self, start, end, color=None):
start, end = list(map(np.array, [start, end]))
length = get_norm(end - start)
if length == 0:
points = [start]
else:
epsilon = self.epsilon / length
points = [
interpolate(start, end, t)
for t in np.arange(0, 1, epsilon)
]
self.add_points(points, color=color)
class Mobject2D(PMobject):
CONFIG = {
"density": DEFAULT_POINT_DENSITY_2D,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.epsilon = 1.0 / self.density
Mobject.__init__(self, **kwargs)
class PointCloudDot(Mobject1D):
CONFIG = {
"radius": 0.075,
"stroke_width": 2,
"density": DEFAULT_POINT_DENSITY_1D,
"color": YELLOW,
}
def __init__(self, center=ORIGIN, **kwargs):
Mobject1D.__init__(self, **kwargs)
self.shift(center)
def generate_points(self):
self.add_points([
r * (np.cos(theta) * RIGHT + np.sin(theta) * UP)
for r in np.arange(0, self.radius, self.epsilon)
for theta in np.arange(0, 2 * np.pi, self.epsilon / r)
])
class Point(PMobject):
CONFIG = {
"color": BLACK,
}
def __init__(self, location=ORIGIN, **kwargs):
PMobject.__init__(self, **kwargs)
self.add_points([location])
|
py | 1a413db6593623fe78a4e3984c0e619f87000b82 | from pandas import Series
obj = Series([4, 7, -5, 3], dtype=int)
# 0 4
# 1 7
# 2 -5
# 3 3
# dtype: int32
print(obj)
# [ 4 7 -5 3]
print(obj.values)
# RangeIndex(start=0, stop=4, step=1)
print(obj.index)
obj = Series([4, 7, -5, 3], index=['a', 'b', 'c', 'd'])
# a 4
# b 7
# c -5
# d 3
# dtype: int64
print(obj)
# -5
print(obj['c'])
# a 4
# b 7
print(obj[:2])
# a True
# b True
# c False
# d True
# dtype: bool
print(obj > 0)
# a 4
# b 7
# d 3
# dtype: int64
print(obj[obj > 0])
# True
print('b' in obj)
# 通过数据字典生成Series
# 通过字典生成的Series,字典的键即为索引
obj = Series({'id': 128, 'name': 'Jane', 'age': 17})
# id 128
# name Jane
# age 17
# dtype: object
print(obj)
# 如果指定的索引在字典中不存在,则以NaN填充,表示不是一个数字,Series索引的顺序由index参数指定
obj = Series({'id': 128, 'name': 'Jane', 'age': 17}, index=['id', 'age', 'name', 'mobile'])
# id 128
# age 17
# name Jane
# mobile NaN
# dtype: object
print(obj)
# id False
# age False
# name False
# mobile True
# dtype: bool
print(obj.isnull())
# Series对象本身及其索引都有一个name属性,可以通过赋值设置
obj.name = 'my-series'
obj.index.name = 'my-series-state'
# my-series-state
# id 128
# age 17
# name Jane
# mobile NaN
# Name: my-series, dtype: object
print(obj)
|
py | 1a413e18e6f34bd17d0faffb960d081d9e6ff5fc | """Image loaders."""
from .common import SDLError
from .compat import UnsupportedError, byteify
from .. import endian, surface, pixels
_HASPIL = True
try:
from PIL import Image
except ImportError:
_HASPIL = False
_HASSDLIMAGE = True
try:
from .. import sdlimage
except ImportError:
_HASSDLIMAGE = False
__all__ = ["get_image_formats", "load_image"]
def get_image_formats():
"""Gets the formats supported in the default installation."""
if not _HASPIL and not _HASSDLIMAGE:
return ("bmp", )
return ("bmp", "cur", "gif", "ico", "jpg", "lbm", "pbm", "pcx", "pgm",
"png", "pnm", "ppm", "svg", "tga", "tif", "webp", "xcf", "xpm")
def load_image(fname, enforce=None):
"""Creates a SDL_Surface from an image file.
This function makes use of the Python Imaging Library, if it is available
on the target execution environment. The function will try to load the
file via sdl2 first. If the file could not be loaded, it will try
to load it via sdl2.sdlimage and PIL.
You can force the function to use only one of them, by passing the enforce
as either "PIL" or "SDL".
Note: This will call sdl2.sdlimage.init() implicitly with the default
arguments, if the module is available and if sdl2.SDL_LoadBMP() failed to
load the image.
"""
if enforce is not None and enforce not in ("PIL", "SDL"):
raise ValueError("enforce must be either 'PIL' or 'SDL', if set")
if fname is None:
raise ValueError("fname must be a string")
name = fname
if hasattr(fname, 'encode'):
name = byteify(fname, "utf-8")
if not _HASPIL and not _HASSDLIMAGE:
imgsurface = surface.SDL_LoadBMP(name)
if not imgsurface:
raise UnsupportedError(load_image,
"cannot use PIL or SDL for image loading")
return imgsurface.contents
if enforce == "PIL" and not _HASPIL:
raise UnsupportedError(load_image, "cannot use PIL (not found)")
if enforce == "SDL" and not _HASSDLIMAGE:
imgsurface = surface.SDL_LoadBMP(name)
if not imgsurface:
raise UnsupportedError(load_image,
"cannot use SDL_image (not found)")
return imgsurface.contents
imgsurface = None
if enforce != "PIL" and _HASSDLIMAGE:
sdlimage.IMG_Init(sdlimage.IMG_INIT_JPG | sdlimage.IMG_INIT_PNG |
sdlimage.IMG_INIT_TIF | sdlimage.IMG_INIT_WEBP)
imgsurface = sdlimage.IMG_Load(name)
if not imgsurface:
# An error occured - if we do not try PIL, break out now
if not _HASPIL or enforce == "SDL":
raise SDLError(sdlimage.IMG_GetError())
else:
imgsurface = imgsurface.contents
if enforce != "SDL" and _HASPIL and not imgsurface:
image = Image.open(fname)
mode = image.mode
width, height = image.size
rmask = gmask = bmask = amask = 0
if mode in ("1", "L", "P"):
# 1 = B/W, 1 bit per byte
# "L" = greyscale, 8-bit
# "P" = palette-based, 8-bit
pitch = width
depth = 8
elif mode == "RGB":
# 3x8-bit, 24bpp
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x0000FF
gmask = 0x00FF00
bmask = 0xFF0000
else:
rmask = 0xFF0000
gmask = 0x00FF00
bmask = 0x0000FF
depth = 24
pitch = width * 3
elif mode in ("RGBA", "RGBX"):
# RGBX: 4x8-bit, no alpha
# RGBA: 4x8-bit, alpha
if endian.SDL_BYTEORDER == endian.SDL_LIL_ENDIAN:
rmask = 0x000000FF
gmask = 0x0000FF00
bmask = 0x00FF0000
if mode == "RGBA":
amask = 0xFF000000
else:
rmask = 0xFF000000
gmask = 0x00FF0000
bmask = 0x0000FF00
if mode == "RGBA":
amask = 0x000000FF
depth = 32
pitch = width * 4
else:
# We do not support CMYK or YCbCr for now
raise TypeError("unsupported image format")
pxbuf = image.tobytes()
imgsurface = surface.SDL_CreateRGBSurfaceFrom(pxbuf, width, height,
depth, pitch, rmask,
gmask, bmask, amask)
if not imgsurface:
raise SDLError()
imgsurface = imgsurface.contents
# the pixel buffer must not be freed for the lifetime of the surface
imgsurface._pxbuf = pxbuf
if mode == "P":
# Create a SDL_Palette for the SDL_Surface
def _chunk(seq, size):
for x in range(0, len(seq), size):
yield seq[x:x + size]
rgbcolors = image.getpalette()
sdlpalette = pixels.SDL_AllocPalette(len(rgbcolors) // 3)
if not sdlpalette:
raise SDLError()
SDL_Color = pixels.SDL_Color
for idx, (r, g, b) in enumerate(_chunk(rgbcolors, 3)):
sdlpalette.contents.colors[idx] = SDL_Color(r, g, b)
ret = surface.SDL_SetSurfacePalette(imgsurface, sdlpalette)
# This will decrease the refcount on the palette, so it gets
# freed properly on releasing the SDL_Surface.
pixels.SDL_FreePalette(sdlpalette)
if ret != 0:
raise SDLError()
# If the image has a single transparent palette index, set
# that index as the color key to make blitting correct.
if 'transparency' in image.info and isinstance(image.info['transparency'], int):
sdl2.SDL_SetColorKey(imgsurface, True, image.info['transparency'])
return imgsurface
|
py | 1a413e485c4102fb6f67bb7283b2fe018387f0ec | """
Contains website related routes and views.
"""
import json
from operator import itemgetter
import os
from urllib import parse as urlparse
import boto3
from boto3.exceptions import Boto3Error
from botocore.exceptions import BotoCoreError
from pyramid.decorator import reify
from pyramid.events import NewResponse
from pyramid.events import subscriber
from pyramid.renderers import get_renderer
from pyramid.response import FileResponse
from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from ichnaea.conf import settings
from ichnaea.content.stats import global_stats, histogram, regions
from ichnaea.models.content import StatKey
from ichnaea import util
HERE = os.path.dirname(__file__)
IMAGE_PATH = os.path.join(HERE, "static", "images")
FAVICON_PATH = os.path.join(IMAGE_PATH, "favicon.ico")
TOUCHICON_PATH = os.path.join(IMAGE_PATH, "apple-touch-icon.png")
CSP_BASE = "'self'"
# See https://docs.mapbox.com/mapbox-gl-js/api/#csp-directives
CSP_POLICY = """\
default-src 'self';
connect-src {base} {tiles} *.tiles.mapbox.com api.mapbox.com events.mapbox.com;
font-src {base};
img-src {base} {tiles} api.mapbox.com data: blob:;
script-src {base} data: 'unsafe-eval';
style-src {base};
child-src blob:;
worker-src blob:;
"""
CSP_POLICY = CSP_POLICY.replace("\n", " ").strip()
TILES_PATTERN = "{z}/{x}/{y}.png"
HOMEPAGE_MAP_IMAGE = (
"https://api.mapbox.com/styles/v1/mapbox/dark-v10/tiles"
"/256/0/0/0@2x?access_token={token}"
)
def get_map_tiles_url(asset_url):
"""Compute tiles url for maps based on the asset_url.
:arg str asset_url: the url to static assets or ''
:returns: tiles_url
"""
asset_url = asset_url if asset_url else "/static/datamap/"
if not asset_url.endswith("/"):
asset_url = asset_url + "/"
return urlparse.urljoin(asset_url, "tiles/" + TILES_PATTERN)
def get_csp_policy(asset_url):
"""Return value for Content-Security-Policy HTTP header.
:arg str asset_url: the url to static assets or ''
:returns: CSP policy string
"""
result = urlparse.urlsplit(asset_url)
map_tiles_src = urlparse.urlunparse((result.scheme, result.netloc, "", "", "", ""))
return CSP_POLICY.format(base=CSP_BASE, tiles=map_tiles_src)
def configure_content(config):
config.add_view(
favicon_view, name="favicon.ico", http_cache=(86400, {"public": True})
)
config.registry.skip_logging.add("/favicon.ico")
config.add_view(
robotstxt_view, name="robots.txt", http_cache=(86400, {"public": True})
)
config.registry.skip_logging.add("/robots.txt")
config.add_view(
touchicon_view,
name="apple-touch-icon-precomposed.png",
http_cache=(86400, {"public": True}),
)
config.registry.skip_logging.add("/apple-touch-icon-precomposed.png")
config.add_static_view(
name="static", path="ichnaea.content:static", cache_max_age=86400
)
config.add_route("stats_regions", "/stats/regions")
config.add_route("stats", "/stats")
config.scan("ichnaea.content.views")
@subscriber(NewResponse)
def security_headers(event):
response = event.response
# Headers for all responses.
response.headers.add(
"Strict-Transport-Security", "max-age=31536000; includeSubDomains"
)
response.headers.add("X-Content-Type-Options", "nosniff")
# Headers for HTML responses.
if response.content_type == "text/html":
response.headers.add(
"Content-Security-Policy", get_csp_policy(settings("asset_url"))
)
response.headers.add("X-Frame-Options", "DENY")
response.headers.add("X-XSS-Protection", "1; mode=block")
def s3_list_downloads(raven_client):
files = {"full": [], "diff1": [], "diff2": []}
if not settings("asset_bucket"):
return files
asset_url = settings("asset_url")
if not asset_url.endswith("/"):
asset_url = asset_url + "/"
diff = []
full = []
try:
s3 = boto3.resource("s3")
bucket = s3.Bucket(settings("asset_bucket"))
for obj in bucket.objects.filter(Prefix="export/"):
name = obj.key.split("/")[-1]
path = urlparse.urljoin(asset_url, obj.key)
# round to kilobyte
size = int(round(obj.size / 1024.0, 0))
file = dict(name=name, path=path, size=size)
if "diff-" in name:
diff.append(file)
elif "full-" in name:
full.append(file)
except (Boto3Error, BotoCoreError):
raven_client.captureException()
return files
half = len(diff) // 2 + len(diff) % 2
diff = list(sorted(diff, key=itemgetter("name"), reverse=True))
files["diff1"] = diff[:half]
files["diff2"] = diff[half:]
files["full"] = list(sorted(full, key=itemgetter("name"), reverse=True))
return files
class ContentViews(object):
def __init__(self, request):
self.request = request
self.session = request.db_session
self.redis_client = request.registry.redis_client
@reify
def base_template(self):
renderer = get_renderer("templates/base.pt")
return renderer.implementation().macros["layout"]
@property
def this_year(self):
return "%s" % util.utcnow().year
def _get_cache(self, cache_key):
cache_key = self.redis_client.cache_keys[cache_key]
cached = self.redis_client.get(cache_key)
if cached:
return json.loads(cached)
return None
def _set_cache(self, cache_key, data, ex=3600):
cache_key = self.redis_client.cache_keys[cache_key]
self.redis_client.set(cache_key, json.dumps(data), ex=ex)
def is_map_enabled(self):
"""Return whether maps are enabled.
Enable maps if and only if there's a mapbox token and a url for the
tiles location. Otherwise it's disabled.
"""
return bool(settings("mapbox_token"))
@view_config(renderer="templates/homepage.pt", http_cache=3600)
def homepage_view(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
image_base_url = HOMEPAGE_MAP_IMAGE.format(token=settings("mapbox_token"))
image_url = map_tiles_url.format(z=0, x=0, y="0@2x")
return {
"page_title": "Overview",
"map_enabled": self.is_map_enabled(),
"map_image_base_url": image_base_url,
"map_image_url": image_url,
}
@view_config(renderer="templates/api.pt", name="api", http_cache=3600)
def api_view(self):
return {"page_title": "API"}
@view_config(renderer="templates/contact.pt", name="contact", http_cache=3600)
def contact_view(self):
return {"page_title": "Contact Us"}
@view_config(renderer="templates/downloads.pt", name="downloads", http_cache=3600)
def downloads_view(self):
data = self._get_cache("downloads")
if data is None:
data = s3_list_downloads(self.request.registry.raven_client)
self._set_cache("downloads", data, ex=1800)
return {"page_title": "Downloads", "files": data}
@view_config(renderer="templates/optout.pt", name="optout", http_cache=3600)
def optout_view(self):
return {"page_title": "Opt-Out"}
@view_config(renderer="templates/privacy.pt", name="privacy", http_cache=3600)
def privacy_view(self):
return {"page_title": "Privacy Notice"}
@view_config(renderer="templates/map.pt", name="map", http_cache=3600)
def map_view(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
return {
"page_title": "Map",
"map_enabled": self.is_map_enabled(),
"map_tiles_url": map_tiles_url,
"map_token": settings("mapbox_token"),
}
@view_config(renderer="json", name="map.json", http_cache=3600)
def map_json(self):
map_tiles_url = get_map_tiles_url(settings("asset_url"))
offset = map_tiles_url.find(TILES_PATTERN)
base_url = map_tiles_url[:offset]
return {"tiles_url": base_url}
@view_config(renderer="json", name="stats_blue.json", http_cache=3600)
def stats_blue_json(self):
data = self._get_cache("stats_blue_json")
if data is None:
data = histogram(self.session, StatKey.unique_blue)
self._set_cache("stats_blue_json", data)
return {"series": [{"title": "MLS Bluetooth", "data": data[0]}]}
@view_config(renderer="json", name="stats_cell.json", http_cache=3600)
def stats_cell_json(self):
data = self._get_cache("stats_cell_json")
if data is None:
data = histogram(self.session, StatKey.unique_cell)
self._set_cache("stats_cell_json", data)
return {"series": [{"title": "MLS Cells", "data": data[0]}]}
@view_config(renderer="json", name="stats_wifi.json", http_cache=3600)
def stats_wifi_json(self):
data = self._get_cache("stats_wifi_json")
if data is None:
data = histogram(self.session, StatKey.unique_wifi)
self._set_cache("stats_wifi_json", data)
return {"series": [{"title": "MLS WiFi", "data": data[0]}]}
@view_config(renderer="templates/stats.pt", route_name="stats", http_cache=3600)
def stats_view(self):
data = self._get_cache("stats")
if data is None:
data = {"metrics1": [], "metrics2": []}
metric_names = [
("1", StatKey.unique_blue.name, "Bluetooth Networks"),
("1", StatKey.blue.name, "Bluetooth Observations"),
("1", StatKey.unique_wifi.name, "Wifi Networks"),
("1", StatKey.wifi.name, "Wifi Observations"),
("2", StatKey.unique_cell.name, "MLS Cells"),
("2", StatKey.cell.name, "MLS Cell Observations"),
]
metrics = global_stats(self.session)
for i, mid, name in metric_names:
data["metrics" + i].append({"name": name, "value": metrics[mid]})
self._set_cache("stats", data)
result = {"page_title": "Statistics"}
result.update(data)
return result
@view_config(
renderer="templates/stats_regions.pt",
route_name="stats_regions",
http_cache=3600,
)
def stats_regions_view(self):
data = self._get_cache("stats_regions")
if data is None:
data = regions(self.session)
self._set_cache("stats_regions", data)
return {"page_title": "Regions", "metrics": data}
@view_config(renderer="templates/terms.pt", name="terms", http_cache=3600)
def terms_of_service(self):
return {
"page_title": (
"Developer Terms of Service:" " Mozilla Location Service Query API"
)
}
@view_config(context=HTTPNotFound)
def default_not_found(exc):
response = Response("<h1>404 Not Found</h1>")
response.status_int = 404
return response
def favicon_view(request):
return FileResponse(FAVICON_PATH, request=request)
def touchicon_view(request):
return FileResponse(TOUCHICON_PATH, request=request)
_ROBOTS_RESPONSE = """\
User-agent: *
Disallow: /downloads
Disallow: /static/
Disallow: /v1/
Disallow: /v2/
Disallow: /__heartbeat__
Disallow: /__lbheartbeat__
Disallow: /__version__
"""
def robotstxt_view(context, request):
return Response(content_type="text/plain", body=_ROBOTS_RESPONSE)
|
py | 1a413f40dd75841a7dcdc088ac21f951740a382c | """Testing v0x05 error message class."""
from pyof.v0x05.asynchronous.error_msg import ErrorMsg
from tests.test_struct import TestStruct
class TestErrorMsg(TestStruct):
"""ErroMsg message tests (also those in :class:`.TestDump`)."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x05', 'ofpt_error_msg')
super().set_raw_dump_object(ErrorMsg, xid=1, error_type=1, code=1)
super().set_minimum_size(12)
|
py | 1a413f5b67370774db10a74b7cac4a2f191afa59 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Ikc",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Ikc")
}
]
|
py | 1a41403040d7c18a0db54ec836b696ad150bc75d | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.softwarerepository_file_server import SoftwarerepositoryFileServer
from intersight.model.softwarerepository_local_machine_all_of import SoftwarerepositoryLocalMachineAllOf
globals()['SoftwarerepositoryFileServer'] = SoftwarerepositoryFileServer
globals()['SoftwarerepositoryLocalMachineAllOf'] = SoftwarerepositoryLocalMachineAllOf
class SoftwarerepositoryLocalMachine(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
},
('object_type',): {
'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'download_url': (str,), # noqa: E501
'part_size': (int,), # noqa: E501
'upload_id': (str,), # noqa: E501
'upload_url': (str,), # noqa: E501
'upload_urls': ([str], none_type,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'download_url': 'DownloadUrl', # noqa: E501
'part_size': 'PartSize', # noqa: E501
'upload_id': 'UploadId', # noqa: E501
'upload_url': 'UploadUrl', # noqa: E501
'upload_urls': 'UploadUrls', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SoftwarerepositoryLocalMachine - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "softwarerepository.LocalMachine", must be one of ["softwarerepository.LocalMachine", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "softwarerepository.LocalMachine", must be one of ["softwarerepository.LocalMachine", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
download_url (str): When the import action in the file MO is updated with 'GeneratePreSignedDownloadUrl', Intersight returns a pre-signed URL in this property as part of the patch response. The user is expected to subsequently download the file using this URL.. [optional] # noqa: E501
part_size (int): The chunk size (in bytes) for each part of the file to be uploaded.. [optional] # noqa: E501
upload_id (str): When the import action in file MO is updated with 'GeneratePreSignedUploadUrl', Intersight shall return a upload Id in this property as part of the PATCH response.. [optional] # noqa: E501
upload_url (str): When a file MO is created with 'LocalMachine' as the source, Intersight returns a pre-signed URL in this property as part of the POST response. The user is expected to subsequently upload the file content using this URL. Once the upload is completed, the user is expected to patch the uploader object's transfer state to success.. [optional] # noqa: E501
upload_urls ([str], none_type): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "softwarerepository.LocalMachine")
object_type = kwargs.get('object_type', "softwarerepository.LocalMachine")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
SoftwarerepositoryFileServer,
SoftwarerepositoryLocalMachineAllOf,
],
'oneOf': [
],
}
|
py | 1a414051502ab28fe40960da15e8925350a0b08b | from kivymd.app import MDApp
from kivy.lang import Builder
kv = '''
#:import toast kivymd.toast.toast
MDBoxLayout :
id : box
orientation : 'vertical'
padding : '20dp'
spacing : '20dp'
MDLabel :
text : 'Enter some text'
halign : 'center'
font_size : 100
MDTextField :
id : field
hint_text : 'enter some text'
color_active : [0,1,0,1]
font_size : 30
size_hint_x : None
width : box.width - 10
pos_hint : {'center_x': .5}
MDRoundFlatButton :
text : 'See your inputted text'
md_bg_color : [0,0,1,1]
on_release : toast(field.text)
size_hint : None, None
height : '100dp'
width : box.width - 10
pos_hint : {'center_x': .5}
Widget :
'''
class TextFieldLabel(MDApp):
def build(self):
return Builder.load_string(kv)
TextFieldLabel().run() |
py | 1a41407c1c06bfb59002d0fe93c35bcd7e00fdc6 | # MIT License
#
# Copyright (c) 2020 Jonathan Zernik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
from typing import Dict
from typing import List
from typing import Optional
from squeaknode.core.peer_address import PeerAddress
from squeaknode.network.peer import Peer
from squeaknode.node.listener_subscription_client import EventListener
MIN_PEERS = 5
MAX_PEERS = 10
UPDATE_THREAD_SLEEP_TIME = 10
logger = logging.getLogger(__name__)
class ConnectionManager(object):
"""Maintains connections to other peers in the network.
"""
def __init__(self):
self._peers: Dict[PeerAddress, Peer] = {}
self.peers_lock = threading.Lock()
self.peer_changed_listener = EventListener()
self.single_peer_changed_listener = EventListener()
self.accept_connections = True
@property
def peers(self) -> List[Peer]:
return list(self._peers.values())
def has_connection(self, address):
"""Return True if the address is already connected."""
return address in self._peers
def on_peers_changed(self):
peers = self.peers
logger.info('Current number of peers {}'.format(len(peers)))
logger.info('Current peers:--------')
for peer in peers:
logger.info(peer)
logger.info('--------------')
self.peer_changed_listener.handle_new_item(peers)
def _is_duplicate_nonce(self, peer):
for other_peer in self.peers:
if other_peer.local_version:
if peer.remote_version == other_peer.local_version.nNonce:
return True
return False
def add_peer(self, peer: Peer):
"""Add a peer.
"""
with self.peers_lock:
if not self.accept_connections:
raise NotAcceptingConnectionsError()
if self._is_duplicate_nonce(peer):
logger.debug('Failed to add peer {}'.format(peer))
raise DuplicateNonceError()
if self.has_connection(peer.remote_address):
logger.debug('Failed to add peer {}'.format(peer))
raise DuplicatePeerError()
self._peers[peer.remote_address] = peer
logger.debug('Added peer {}'.format(peer))
self.on_peers_changed()
def remove_peer(self, peer: Peer):
"""Add a peer.
"""
with self.peers_lock:
if not self.has_connection(peer.remote_address):
logger.debug('Failed to remove peer {}'.format(peer))
raise MissingPeerError()
del self._peers[peer.remote_address]
logger.debug('Removed peer {}'.format(peer))
self.on_peers_changed()
def get_peer(self, address) -> Optional[Peer]:
"""Get a peer info by address.
"""
return self._peers.get(address)
def stop_connection(self, address):
"""Stop peer connections for address.
"""
with self.peers_lock:
peer = self.get_peer(address)
if peer is not None:
peer.stop()
def stop_all_connections(self):
"""Stop all peer connections.
"""
self.accept_connections = False
with self.peers_lock:
for peer in self.peers:
peer.stop()
def yield_peers_changed(self, stopped: threading.Event):
yield from self.peer_changed_listener.yield_items(stopped)
def yield_single_peer_changed(self, peer_address: PeerAddress, stopped: threading.Event):
for peer in self.single_peer_changed_listener.yield_items(stopped):
logger.debug('yield_single_peer_changed: {}'.format(peer))
if peer.remote_address == peer_address:
if peer.connect_time is None:
yield None
else:
yield peer
class DuplicatePeerError(Exception):
pass
class DuplicateNonceError(Exception):
pass
class MissingPeerError(Exception):
pass
class NotAcceptingConnectionsError(Exception):
pass
|
py | 1a4141a91a62bfc6cd06d89bb0c20b4e61512346 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
WIN1255_CHAR_TO_ORDER_MAP = (
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
254,
255,
255,
254,
255,
255, # 00
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255, # 10
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253,
253, # 20
252,
252,
252,
252,
252,
252,
252,
252,
252,
252,
253,
253,
253,
253,
253,
253, # 30
253,
69,
91,
79,
80,
92,
89,
97,
90,
68,
111,
112,
82,
73,
95,
85, # 40
78,
121,
86,
71,
67,
102,
107,
84,
114,
103,
115,
253,
253,
253,
253,
253, # 50
253,
50,
74,
60,
61,
42,
76,
70,
64,
53,
105,
93,
56,
65,
54,
49, # 60
66,
110,
51,
43,
44,
63,
81,
77,
98,
75,
108,
253,
253,
253,
253,
253, # 70
124,
202,
203,
204,
205,
40,
58,
206,
207,
208,
209,
210,
211,
212,
213,
214,
215,
83,
52,
47,
46,
72,
32,
94,
216,
113,
217,
109,
218,
219,
220,
221,
34,
116,
222,
118,
100,
223,
224,
117,
119,
104,
125,
225,
226,
87,
99,
227,
106,
122,
123,
228,
55,
229,
230,
101,
231,
232,
120,
233,
48,
39,
57,
234,
30,
59,
41,
88,
33,
37,
36,
31,
29,
35,
235,
62,
28,
236,
126,
237,
238,
38,
45,
239,
240,
241,
242,
243,
127,
244,
245,
246,
247,
248,
249,
250,
9,
8,
20,
16,
3,
2,
24,
14,
22,
1,
25,
15,
4,
11,
6,
23,
12,
19,
13,
26,
18,
27,
21,
17,
7,
10,
5,
251,
252,
128,
96,
253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HEBREW_LANG_MODEL = (
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
2,
1,
2,
0,
1,
0,
0,
3,
0,
3,
1,
0,
0,
1,
3,
2,
0,
1,
1,
2,
0,
2,
2,
2,
1,
1,
1,
1,
2,
1,
1,
1,
2,
0,
0,
2,
2,
0,
1,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
1,
2,
1,
2,
1,
2,
0,
0,
2,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
1,
2,
1,
3,
1,
1,
0,
0,
2,
0,
0,
0,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
1,
0,
1,
2,
2,
1,
3,
1,
2,
1,
1,
2,
2,
0,
0,
2,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
2,
2,
2,
2,
3,
2,
1,
2,
1,
2,
2,
2,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
2,
3,
2,
2,
3,
2,
2,
2,
1,
2,
2,
2,
2,
1,
2,
1,
1,
2,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
0,
2,
2,
2,
2,
2,
0,
2,
0,
2,
2,
2,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
0,
2,
2,
2,
0,
2,
1,
2,
2,
2,
0,
0,
2,
1,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
1,
2,
3,
2,
2,
2,
1,
2,
1,
2,
2,
2,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
1,
0,
2,
0,
2,
0,
2,
1,
2,
2,
2,
0,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
2,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
2,
3,
2,
2,
3,
2,
1,
2,
1,
1,
1,
0,
1,
1,
1,
1,
1,
3,
0,
1,
0,
0,
0,
0,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
2,
2,
2,
0,
2,
0,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
2,
1,
2,
3,
3,
2,
3,
3,
3,
3,
2,
3,
2,
1,
2,
0,
2,
1,
2,
0,
2,
0,
2,
2,
2,
0,
0,
1,
2,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
1,
2,
2,
3,
3,
2,
3,
2,
3,
2,
2,
3,
1,
2,
2,
0,
2,
2,
2,
0,
2,
1,
2,
2,
2,
0,
0,
1,
2,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
2,
3,
3,
2,
2,
2,
3,
3,
3,
3,
1,
3,
2,
2,
2,
0,
2,
0,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
3,
3,
3,
2,
3,
2,
2,
2,
1,
2,
2,
0,
2,
2,
2,
2,
0,
2,
0,
2,
2,
2,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
1,
3,
2,
3,
3,
2,
3,
3,
2,
2,
1,
2,
2,
2,
2,
2,
2,
0,
2,
1,
2,
1,
2,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
2,
3,
2,
3,
3,
2,
3,
3,
3,
3,
2,
3,
2,
3,
3,
3,
3,
3,
2,
2,
2,
2,
2,
2,
2,
1,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
1,
2,
3,
3,
3,
3,
3,
3,
3,
2,
3,
2,
3,
2,
1,
2,
3,
0,
2,
1,
2,
2,
0,
2,
1,
1,
2,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
2,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
3,
2,
1,
3,
1,
2,
2,
2,
1,
2,
3,
3,
1,
2,
1,
2,
2,
2,
2,
0,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
0,
2,
3,
3,
3,
1,
3,
3,
3,
1,
2,
2,
2,
2,
1,
1,
2,
2,
2,
2,
2,
2,
0,
2,
0,
1,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
3,
3,
3,
3,
3,
2,
3,
3,
3,
2,
2,
3,
3,
3,
2,
1,
2,
3,
2,
3,
2,
2,
2,
2,
1,
2,
1,
1,
1,
2,
2,
0,
2,
1,
1,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
3,
3,
3,
3,
2,
3,
3,
2,
3,
1,
2,
2,
2,
2,
3,
2,
3,
1,
1,
2,
2,
1,
2,
2,
1,
1,
0,
2,
2,
2,
2,
0,
1,
0,
1,
2,
2,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
3,
0,
0,
1,
1,
0,
1,
0,
0,
1,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
1,
0,
1,
0,
1,
1,
0,
1,
1,
0,
0,
0,
1,
1,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
3,
2,
2,
1,
2,
2,
2,
2,
2,
2,
2,
1,
2,
2,
1,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
0,
3,
3,
3,
0,
3,
0,
2,
2,
2,
2,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
2,
2,
2,
3,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
2,
1,
2,
2,
2,
1,
1,
1,
2,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
3,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
1,
0,
2,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
1,
1,
1,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
1,
2,
2,
2,
2,
2,
1,
2,
2,
2,
1,
1,
2,
2,
2,
2,
2,
2,
2,
1,
2,
2,
1,
0,
1,
1,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
2,
1,
1,
1,
1,
2,
1,
1,
2,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
2,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
2,
2,
2,
2,
1,
2,
1,
2,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
1,
2,
1,
1,
2,
1,
1,
1,
2,
1,
2,
1,
2,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
1,
2,
2,
2,
1,
2,
2,
2,
2,
2,
2,
2,
2,
1,
2,
1,
1,
1,
1,
1,
1,
2,
1,
2,
1,
1,
0,
1,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
2,
0,
2,
0,
1,
2,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
3,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
1,
1,
1,
2,
1,
2,
2,
2,
0,
2,
0,
2,
0,
1,
1,
2,
1,
1,
1,
1,
2,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
1,
0,
1,
2,
2,
0,
1,
0,
0,
1,
1,
2,
2,
1,
2,
0,
2,
0,
0,
0,
1,
2,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
2,
1,
2,
0,
2,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
0,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
0,
0,
1,
1,
1,
2,
2,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
1,
2,
1,
0,
1,
1,
1,
0,
1,
0,
1,
1,
1,
1,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
1,
0,
2,
0,
1,
2,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
1,
0,
0,
1,
0,
1,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
1,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
2,
0,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
1,
0,
1,
0,
0,
1,
1,
2,
1,
1,
2,
0,
1,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
2,
0,
1,
0,
0,
0,
0,
2,
1,
1,
2,
0,
2,
0,
0,
0,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
0,
0,
2,
2,
1,
2,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
2,
0,
1,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
2,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
2,
0,
0,
0,
0,
2,
1,
1,
1,
0,
2,
1,
1,
0,
0,
0,
2,
1,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
2,
0,
1,
0,
0,
1,
1,
0,
2,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
1,
2,
2,
1,
1,
1,
0,
1,
1,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
0,
0,
1,
1,
0,
1,
2,
1,
0,
2,
0,
0,
0,
1,
1,
0,
1,
2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
0,
0,
0,
1,
0,
0,
2,
0,
2,
1,
1,
0,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
2,
0,
1,
0,
0,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
1,
0,
0,
2,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
0,
1,
0,
1,
0,
1,
1,
1,
2,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
1,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
0,
1,
0,
0,
0,
1,
1,
0,
0,
)
Win1255HebrewModel = {
"char_to_order_map": WIN1255_CHAR_TO_ORDER_MAP,
"precedence_matrix": HEBREW_LANG_MODEL,
"typical_positive_ratio": 0.984004,
"keep_english_letter": False,
"charset_name": "windows-1255",
"language": "Hebrew",
}
|
py | 1a4141b02a3d3ef0342fc141767399cdbe177593 | # -*- coding: utf-8 -*-
#
"""
Partie arithmetique du module lycee.
"""
def pgcd(a, b):
"""Renvoie le Plus Grand Diviseur Communs des entiers ``a`` et ``b``.
Arguments:
a (int) : un nombre entier
b (int) : un nombre entier
"""
if a < 0 or b < 0:
return pgcd(abs(a), abs(b))
if b == 0:
if a == 0:
raise ZeroDivisionError(
"Le PGCD de deux nombres nuls n'existe pas")
return a
return pgcd(b, a % b)
def reste(a, b):
"""Renvoie le reste de la division de ``a`` par ``b``.
Arguments:
a (int): Un nombre entier.
b (int): Un nombre entier non nul.
"""
r = a % b
if r < 0:
r = r + abs(b)
return r
def quotient(a, b):
"""Le quotient de la division de ``a`` par ``b``.
Arguments:
a (int): Un nombre entier.
b (int): Un nombre entier non nul.
"""
return a // b
|
py | 1a4141d5a5d8823887eed9675cdcf85382725355 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Ternaris, Munich, Germany
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division
import hashlib
import os
from collections import namedtuple
from datetime import datetime
from fnmatch import fnmatch
from logging import getLogger
from .model import File, Fileset
from ._utils import multiplex
from .widgeting import make_register, WidgetBase
class Scanner(WidgetBase):
def __init__(self, pattern, **kw):
super(Scanner, self).__init__(**kw)
self.pattern = pattern
def __call__(self, fileinfos):
filtered = (x for x in fileinfos
if fnmatch(os.path.join(x.dirpath, x.name), self.pattern))
return self.callback(filtered)
SCANNER = dict()
scanner = make_register('scanner', namespace='', registry=SCANNER, cls=Scanner)
FileInfo = namedtuple('FileInfo', ('dirpath', 'name'))
FilesetInfo = namedtuple('FilesetInfo', ('type', 'dirpath', 'name', 'indexed_files'))
class BrokenFileset(Exception):
def __init__(self, *args, **kw):
super(BrokenFileset, self).__init__(self.__class__.__name__, *args, **kw)
class MissingFile(BrokenFileset):
pass
class MissingMD5(BrokenFileset):
pass
class EmptyFile(BrokenFileset):
pass
def detect_filesets(basedir, scanners):
"""Walk basedir using scanners to detect filesets, return filesetinfos"""
logger = getLogger(__name__)
assert os.path.isdir(basedir)
assert len(scanners) > 0
for dirpath, subdirs, filenames in os.walk(basedir):
logger.debug('Scanning %s', dirpath)
fileinfos = (FileInfo(dirpath, filename)
for filename in filenames
if filename[0] != '.' and # skip hidden files
filename[-4:] != '.md5') # skip md5 files
for filesetinfo in list(multiplex(fileinfos, scanners, dont_catch=True)):
yield filesetinfo
def make_file(fileinfo):
"""Make File model from FileInfo"""
path = os.path.join(fileinfo.dirpath, fileinfo.name)
md5file = '{}.md5'.format(path)
try:
with open(md5file, 'rb') as f:
md5 = f.read(32)
except IOError:
raise MissingMD5(fileinfo.dirpath, fileinfo.name)
stat = os.stat(path)
size = stat.st_size
if size == 0:
raise EmptyFile(fileinfo.dirpath, fileinfo.name)
return File(name=fileinfo.name, md5=md5, size=size)
def make_fileset(filesetinfo):
"""Make Fileset model from FilesetInfo"""
files = []
md5 = hashlib.md5()
for idx, fileinfo in filesetinfo.indexed_files:
file = make_file(fileinfo)
files.append(file)
md5.update(file.md5)
missing_files = 1 + idx - len(files)
if missing_files:
dirpath = filesetinfo.indexed_files[0][1].dirpath
raise MissingFile(dirpath, filesetinfo.name, missing_files)
now = datetime.utcnow()
return Fileset(name=filesetinfo.name, md5=md5.hexdigest(),
dirpath=filesetinfo.dirpath, type=filesetinfo.type,
files=files, time_added=now, time_updated=now)
def scan(basedir, scanner=SCANNER):
"""Scan basedir, return Fileset models, log warning for broken sets"""
logger = getLogger(__name__)
scanners = scanner.values()
for filesetinfo in detect_filesets(basedir, scanners):
try:
fileset = make_fileset(filesetinfo)
except BrokenFileset as e:
logger.warn(e)
continue
yield fileset
|
py | 1a4141ec19cbacad6c73f867f56c1b6f4fd08c61 | from snovault import (
CONNECTION,
upgrade_step,
)
@upgrade_step('fastqc_quality_metric', '2', '3')
def fastqc_quality_metric_2_3(value, system):
# http://redmine.encodedcc.org/issues/3897
# get from the file the lab and award for the attribution!!!
conn = system['registry'][CONNECTION]
f = conn.get_by_uuid(value['quality_metric_of'][0])
award_uuid = str(f.properties['award'])
lab_uuid = str(f.properties['lab'])
award = conn.get_by_uuid(award_uuid)
lab = conn.get_by_uuid(lab_uuid)
value['award'] = '/awards/'+str(award.properties['name'])+'/'
value['lab'] = '/labs/'+str(lab.properties['name'])+'/'
@upgrade_step('fastqc_quality_metric', '3', '4')
def fastqc_quality_metric_3_4(value, system):
return
@upgrade_step('fastqc_quality_metric', '4', '5')
def fastqc_quality_metric_4_5(value, system):
# http://redmine.encodedcc.org/issues/2491
if 'assay_term_id' in value:
del value['assay_term_id']
if 'notes' in value:
if value['notes']:
value['notes'] = value['notes'].strip()
else:
del value['notes']
|
py | 1a41425280ecf0dcbe892507a831e8871ecf58fc | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
from six.moves.configparser import ConfigParser
from traits.api import Str, Property, cached_property, Int, \
Any, String, Event, Bool, Dict, List, Button, CInt
# ============= local library imports ==========================
from pychron.core.helpers.filetools import glob_list_directory
from pychron.dvc.dvc_irradiationable import DVCAble
from pychron.entry.entry_views.user_entry import UserEntry
from pychron.globals import globalv
from pychron.paths import paths
from pychron.persistence_loggable import PersistenceLoggable
from pychron.pychron_constants import NULL_STR, LINE_STR
class ExperimentQueueFactory(DVCAble, PersistenceLoggable):
application = Any
username = String
email = Property(depends_on='username, use_email, _email')
_email = Str
_emails = Dict
use_group_email = Bool
use_email = Bool
edit_emails = Button
usernames = Property(depends_on='users_dirty, db_refresh_needed')
edit_user = Event
add_user = Event
users_dirty = Event
db_refresh_needed = Event
mass_spectrometer = String('Spectrometer')
mass_spectrometers = Property(depends_on='db_refresh_needed')
extract_device = String('Extract Device')
extract_devices = Property(depends_on='db_refresh_needed')
queue_conditionals_name = Str
available_conditionals = List
delay_between_analyses = Int(30)
delay_before_analyses = Int(5)
delay_after_blank = Int(15)
delay_after_air = Int(15)
tray = Str
trays = Property
note = Str
default_lighting = CInt(0)
load_name = Str
select_existing_load_name_button = Button
ok_make = Property(depends_on='mass_spectrometer, username')
persistence_name = 'queue_factory'
pattributes = ('mass_spectrometer',
'extract_device',
'use_group_email',
'delay_between_analyses',
'delay_before_analyses',
'delay_after_blank',
'delay_after_air',
'default_lighting',
'queue_conditionals_name')
def activate(self, load_persistence):
"""
called by ExperimentFactory
"""
self._load_queue_conditionals()
if load_persistence:
self.load()
self.username = globalv.username
def deactivate(self):
"""
called by ExperimentFactory.destroy
"""
self.dump()
# persistence
def _load_queue_conditionals(self):
root = paths.queue_conditionals_dir
cs = glob_list_directory(root, remove_extension=True)
self.available_conditionals = [NULL_STR] + cs
def _select_existing_load_name_button_fired(self):
db = self.get_database()
if db is None or not db.connect():
self.warning_dialog('Not connected to a database')
else:
with db.session_ctx(use_parent_session=False):
loads = db.get_loads()
from pychron.database.views.load_view import LoadView
lv = LoadView(records = loads)
info = lv.edit_traits()
if info.result:
self.load_name = lv.selected.name
self.tray = lv.selected.holderName
# ===============================================================================
# property get/set
# ===============================================================================
def _get_email(self):
email = ''
if self.use_email:
if self._email:
email = self._email
else:
if self.username in self._emails:
email = self._emails[self.username]
return email
def _set_email(self, v):
self._email = v
@cached_property
def _get_ok_make(self):
ms = self.mass_spectrometer.strip()
un = self.username.strip()
return bool(ms and ms not in ('Spectrometer', LINE_STR) and un)
@cached_property
def _get_trays(self):
db = self.get_database()
if db is None or not db.connect():
return []
trays = [NULL_STR]
dbtrays = db.get_load_holders()
if dbtrays:
trays.extend(dbtrays)
return trays
@cached_property
def _get_usernames(self):
db = self.get_database()
if db is None or not db.connect():
return []
us = []
with db.session_ctx(use_parent_session=False):
dbus = db.get_users()
if dbus:
us = [ui.name for ui in dbus]
self._emails = {ui.name: ui.email or '' for ui in dbus}
return [''] + us
@cached_property
def _get_extract_devices(self):
"""
look in db first
then look for a config file
then use hardcorded defaults
"""
db = self.get_database()
cp = os.path.join(paths.setup_dir, 'names')
if db:
if not db.connect():
return []
with db.session_ctx(use_parent_session=False):
names = db.get_extraction_device_names()
elif os.path.isfile(cp):
names = self._get_names_from_config(cp, 'Extraction Devices')
else:
names = ['Fusions Diode', 'Fusions UV', 'Fusions CO2']
return ['Extract Device', LINE_STR] + names
@cached_property
def _get_mass_spectrometers(self):
"""
look in db first
then look for a config file
then use hardcorded defaults
"""
db = self.get_database()
cp = os.path.join(paths.setup_dir, 'names')
if db:
if not db.connect():
self.warning('not connected to database')
return []
with db.session_ctx(use_parent_session=False):
ms = db.get_mass_spectrometer_names()
names = [mi.capitalize() for mi in ms]
elif os.path.isfile(cp):
names = self._get_names_from_config(cp, 'Mass Spectrometers')
else:
names = ['Jan', 'Obama']
return ['Spectrometer', LINE_STR] + names
def _get_names_from_config(self, cp, section):
config = ConfigParser()
config.read(cp)
if config.has_section(section):
return [config.get(section, option) for option in config.options(section)]
# handlers
def _edit_user_fired(self):
a = UserEntry(dvc=self.dvc)
nuser = a.edit(self.username)
if nuser:
self.users_dirty = True
self.username = nuser
def _mass_spectrometer_changed(self, new):
self.debug('mass spectrometer ="{}"'.format(new))
def _edit_emails_fired(self):
task = self.application.open_task('pychron.users')
task.auto_save = True
if __name__ == '__main__':
g = ExperimentQueueFactory()
g.configure_traits()
# ============= EOF =============================================
|
py | 1a41425528b892db2add79e9a51262172935eb0e | import sqlite3
def make_db():
con = sqlite3.connect("Paths.db")
c = con.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS Paths
( Fromm text not null, Tooo text not null)''')
con.commit()
def insert(x,y):
con = sqlite3.connect("Paths.db")
SQLinsertfb = '''INSERT INTO Paths (Fromm,Tooo) VALUES(?,?)'''
c = con.cursor()
c.execute(SQLinsertfb, (x,y))
con.commit()
def get_last_element():
Sqlmaxid='''SELECT Fromm,Tooo FROM Paths WHERE rowid=(SELECT MAX(rowid) FROM Paths)'''
con = sqlite3.connect("Paths.db")
c=con.cursor()
returned=c.execute(Sqlmaxid)
returned=[list(elem) for elem in returned]
con.commit()
return returned
|
py | 1a4142abde24f6da666a89c1d817eca8ce448f39 | from nominal_unification.Syntax import *
class Closure():
""" A closure represents an expression within a context with bindings.
Variables within said expression may or may not be captured by the
scope.
"""
def __init__(self, expr, scope):
self.expr = expr
self.scope = scope
def __str__(self):
return "〈" + str(self.expr) + "; " + str(self.scope) + "〉"
__repr__ = __str__
def alphaEq(clo1, clo2):
""" Test if two closures are equivalent. Determines the alpha-equivalence
of two expressions with respect to their scopes.
If both terms are free in their respective scopes and have the same
string, then the closures are equivalent. If both are bound by their
respective scopes at the same index then they are also the same
closure, even if the terms have different strings. They are not the
same closure, otherwise.
See [Same-Free] and [Same-Bound] in Figure 1.
"""
l1 = lookupName(clo1.expr, clo1.scope)
l2 = lookupName(clo2.expr, clo2.scope)
# [Same-Free] Figure 3
# a1 = a2
# Φ1 ⊦ Fr a1
# Φ2 ⊦ Fr a2
# -------------------
# 〈a1; Φ1〉 ≈ 〈a2; Φ2〉
if isinstance(l1, Free) and isinstance(l2, Free):
return clo1.expr == clo2.expr
# [Same-Bound] Figure 3
# i1 = i2
# Φ1 ⊦ Bd a1 i1
# Φ2 ⊦ Bd a2 i2
# -------------------
# 〈a1; Φ1〉 ≈ 〈a2; Φ2〉
elif isinstance(l1, Bound) and isinstance(l2, Bound):
return l1.index == l2.index
else:
return False
class NuEquation():
""" Represents constraint equations between expressions which are either
names or variables. The first term in the equation should always be a
name, while the second is either a name (an NN problem) or a variable
(an NV problem).
These equations are used by nu machines to derive maps from variables
to names.
A "nu problem" is a list of nu equations.
See Figure 4.
"""
def __init__(self, clo1, clo2):
if not isName(clo1.expr):
raise UnificationError(
"First argument, " +
str(clo1) +
", of Nu Equation must be a name.")
if not isName(clo2.expr) and not isinstance(clo2.expr, Var):
raise UnificationError(
"Second argument, " +
str(clo2) +
", of Nu Equation must be a name or a variable.")
self.clo1 = clo1 # Clo Name
self.clo2 = clo2
self.var = isinstance(clo2.expr, Var)
# If self.var is true, then self.clo2 will be a closure over
# a Var, otherwise it's a closure over an Name.
def __str__(self):
if self.var:
return "(" + str(self.clo1) + " ≈NV " + str(self.clo2) + ")"
else:
return "(" + str(self.clo1) + " ≈NN " + str(self.clo2) + ")"
__repr__ = __str__
class DeltaEquation():
""" Represents constraint equations between expressions which are variables
(a VV problem).
These equations are used by delta machines to derive unifiers between
sets of variables.
A "delta problem" is a list of delta equations.
See Figure 4.
"""
def __init__(self, clo1, clo2):
if not isinstance(clo1.expr, Var):
raise UnificationError(
"First argument, " +
str(clo1) +
", of Delta Equation must be a variable.")
if not isinstance(clo2.expr, Var):
raise UnificationError(
"Second argument, " +
str(clo2) +
", of Delta Equation must be a variable.")
self.clo1 = clo1 # Clo Var
self.clo2 = clo2 # Clo Var
def __str__(self):
return "(" + str(self.clo1) + " ≈VV " + str(self.clo2) + ")"
__repr__ = __str__
class MultiEquation():
""" Represents a constraint equation between two expressions.
Used by rho machines to compute the nu problems and delta problems to
be fed into the nu and delta machines.
A "rho problem" is a list of multiequations.
See Figure 7.
"""
def __init__(self, clo1, clo2):
self.clo1 = clo1 # Clo Expr
self.clo2 = clo2 # Clo Expr
def __str__(self):
return "(" + str(self.clo1) + " ≈ " + str(self.clo2) + ")"
__repr__ = __str__
def extendSubst(var, expr, sub):
""" Given a variable and an expression it should be substituted for, extend
the substitution with that mapping.
This exists as a non-stateful way to extend substitutions. That is,
this creates a new substitution, rather than modifying an existing one.
"""
subp = sub.copy()
subp[var.string] = expr
return subp
|
py | 1a414406419e0508f8bf05712e5c15577a9ab74b | """
Utility functions for cmiles generator
"""
import numpy as np
import copy
import collections
import warnings
try:
from rdkit import Chem
has_rdkit = True
except ImportError:
has_rdkit = False
try:
from openeye import oechem
if not oechem.OEChemIsLicensed():
has_openeye = False
has_openeye = True
except ImportError:
has_openeye = False
if not has_openeye and not has_rdkit:
raise ImportError("Must have openeye or rdkit installed")
_symbols = {'H': 1, 'He': 2,
'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10,
'Na': 11, 'Mg': 12, 'Al': 13,' Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18,
'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,
'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40,
'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50, 'Sb': 51,
'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, 'Ce': 58, 'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62,
'Eu': 63,' Gd': 64, 'Tb': 65, 'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, 'Hf': 72, 'Ta': 73,
'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79, 'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po':84,
'At': 85, 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90,' Pa': 91, 'U': 92, 'Np': 93, 'Pu': 94, 'Am': 95,
'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100, 'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105,
'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109}
BOHR_2_ANGSTROM = 0.529177210
ANGSROM_2_BOHR = 1. / BOHR_2_ANGSTROM
def load_molecule(inp_molecule, toolkit='openeye', **kwargs):
"""
Load molecule.
Input is restrictive. Allowed inputs are:
1. Isomeric SMILES
2. JSON serialized molecule
Parameters
----------
inp_molecule: str or dict
isomeric SMILES or QCSChema
toolkit: str, optional, default openeye.
cheminformatics toolkit to use
Returns
-------
molecule:
`oechem.OEMOl` or `rdkit.Chem.Mol`
"""
# Check input
if isinstance(inp_molecule, dict):
# This is a JSON molecule.
molecule = mol_from_json(inp_molecule, toolkit=toolkit, **kwargs)
elif isinstance(inp_molecule, str):
if toolkit == 'openeye' and has_openeye:
molecule = oechem.OEMol()
if not oechem.OESmilesToMol(molecule, inp_molecule):
raise ValueError("The supplied SMILES {} could not be parsed".format(inp_molecule))
elif toolkit == 'rdkit' and has_rdkit:
a = Chem.rdmolfiles.SmilesParserParams()
a.removeHs = False
molecule = Chem.MolFromSmiles(inp_molecule, a)
if not molecule:
raise ValueError("The supplied SMILES {} could not be parsed".format(inp_molecule))
else:
raise ValueError("Only openeye and rdkit toolkits are supported")
else:
raise ValueError("Only QCSchema serialized molecule or an isomric SMILES are valid inputs")
return molecule
def mol_from_json(inp_molecule, toolkit='openeye', **kwargs):
"""
Load a molecule from QCSchema
see `QCSchema <https://molssi-qc-schema.readthedocs.io/en/latest/index.html#>`_
Required fields for the QCSchema molecule:
1. symbols
2. geometry
3. connectivity
Parameters
----------
inp_molecule: dict
QCSchema molecule with `symbols`, `geometry` and `connectivity`
toolkit: str, optional. Default openeye
cheminformatics toolkit to use. Currently supports `openeye` and `rdkit`
**permute_xyz: bool, optional, default False
If False, will add flag to molecule such that the mapped SMILES retains the order of serialized geometry. If True,
mapped SMILES will be in canonical order and serialized geometry will have to be reordered.
Returns
-------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
"""
# Check fields
required_fields = ['symbols', 'geometry', 'connectivity']
for key in required_fields:
if key not in inp_molecule:
raise KeyError("input molecule must have {}".format(key))
symbols = inp_molecule['symbols']
connectivity = inp_molecule['connectivity']
# convert to Angstrom.
geometry = np.asarray(inp_molecule['geometry'], dtype=float)*BOHR_2_ANGSTROM
if len(symbols) != geometry.shape[0]/3:
raise ValueError("Number of atoms in molecule does not match length of position array")
if toolkit == 'openeye' and has_openeye:
import cmiles._cmiles_oe as mol_toolkit
elif toolkit == 'rdkit' and has_rdkit:
import cmiles._cmiles_rd as mol_toolkit
else:
raise ValueError("Only openeye and rdkit backends are supported")
molecule = mol_toolkit.mol_from_json(symbols, connectivity, geometry, **kwargs)
return molecule
def mol_to_smiles(molecule, **kwargs):
"""
Generate canonical smiles from molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
**isomeric: bool, optional, default True
If False, SMILES will not include stereo information
**explicit_hydrogen: bool, optional, default True
If True, SMILES will have explicit hydrogen.
**mapped: bool, optional, default True
If True, SMILES will have map indices
Example: O=O will be ``[O:1]=[O:2]``
Returns
-------
str
SMILES
"""
molecule = copy.deepcopy(molecule)
toolkit = _set_toolkit(molecule)
if has_atom_map(molecule):
remove_atom_map(molecule)
return toolkit.mol_to_smiles(molecule, **kwargs)
def to_canonical_label(mapped_smiles, labeled_atoms, toolkit='openeye'):
"""
Generate human readable index with labeled torsions, angles, or bonds
Parameters
----------
mapped_smiles : str
SMILES with map indices
labeled_atoms : tuple of int
ints should correspond to map indices -1 in mapped SMILES
Returns
-------
labeled SMILES
"""
mol = load_molecule(mapped_smiles, toolkit=toolkit)
toolkit = _set_toolkit(mol)
if not has_atom_map(mol):
raise RuntimeError("SMILES must have map indices")
return toolkit.generate_index(mol, labeled_atoms)
def mol_to_hill_molecular_formula(molecule):
"""
Generate Hill sorted empirical formula.
Hill sorted first lists C and H and then all other symbols in alphabetical
order
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
str
hill sorted empirical formula
"""
# check molecule
toolkit = _set_toolkit(molecule)
if not has_explicit_hydrogen(molecule):
molecule = toolkit.add_explicit_hydrogen(molecule)
symbols = toolkit.get_symbols(molecule)
count = collections.Counter(x.title() for x in symbols)
hill_sorted = []
for k in ['C', 'H']:
# remove C and H from count
if k in count:
c = count.pop(k)
hill_sorted.append(k)
if c > 1:
hill_sorted.append(str(c))
for k in sorted(count.keys()):
c = count[k]
hill_sorted.append(k)
if c > 1:
hill_sorted.append(str(c))
return "".join(hill_sorted)
def mol_to_map_ordered_qcschema(molecule, mapped_smiles, multiplicity=1, **kwargs):
"""
Genereate JSON serialize following `QCSchema specs <https://molssi-qc-schema.readthedocs.io/en/latest/index.html#>`_
Geometry, symbols and connectivity table ordered according to map indices in mapped SMILES
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
**molecuel must have a conformer**.
molecule_ids: dict
cmiles generated molecular ids.
multiplicity: int, optional, defualt 1
multiplicity of molecule
Returns
-------
dict
JSON serialized molecule following QCSchema specs
"""
toolkit = _set_toolkit(molecule)
atom_map = toolkit.get_atom_map(molecule, mapped_smiles, **kwargs)
connectivity = get_connectivity_table(molecule, atom_map)
symbols, geometry = toolkit.get_map_ordered_geometry(molecule, atom_map)
charge = get_charge(molecule)
qcschema_mol = {'symbols': symbols, 'geometry': geometry, 'connectivity': connectivity,
'molecular_charge': charge, 'molecular_multiplicity': multiplicity}
return qcschema_mol
def get_atom_map(molecule, mapped_smiles, **kwargs):
"""
Get mapping of map index -> atom index
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
mapped_smiles: str
explicit hydrogen mapped SMILES
Returns
-------
atom_map: dict
dictionary mapping `{map_index: atom_index}`
"""
toolkit = _set_toolkit(molecule)
atom_map = toolkit.get_atom_map(molecule, mapped_smiles)
return atom_map
def get_connectivity_table(molecule, atom_map):
"""
Generate connectivity table
Parameters
----------
molecule:
oechem.Mol or rdkit.Chem.Mol
atom_map: dict
``{map_idx : atom_idx}``
Returns
-------
list: list of lists
lists of atoms bonded and the bond order
[[map_idx_1, map_idx_2, bond_order] ...]
"""
toolkit = _set_toolkit(molecule)
inverse_map = dict(zip(atom_map.values(), atom_map.keys()))
return toolkit.get_connectivity_table(molecule, inverse_map)
def permute_qcschema(json_mol, molecule_ids, **kwargs):
"""
permute geometry and symbols to correspond to map indices on mapped SMILES
Parameters
----------
json_mol: dict
JSON serialized molecule.
Required fields: `symbols`, `geometry`, `connectivity` and `multiplicity`
molecule_ids: dict
cmiles generated molecular ids
Returns
-------
dict
JSON serialized molecule. `symbols`, `geometry`, and `connectivity` ordered according to map indices on mapped
SMILES.
Also includes `identifiers` field with cmiles generated identifiers.
"""
molecule = mol_from_json(json_mol, **kwargs)
ordered_qcschema = mol_to_map_ordered_qcschema(molecule, molecule_ids, json_mol['molecular_multiplicity'])
return ordered_qcschema
def has_atom_map(molecule):
"""
Check if molecule has atom map indices. Will return True even if only one atom has map index
Parameters
----------
molecule:
`oechem.Mol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if has one map index. False if molecule has no map indices
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_atom_map(molecule)
def is_missing_atom_map(molecule):
"""
Check if any atom in molecule is missing atom map index
Parameters
----------
molecule:
oechem.Mol or rdkit.Chem.Mol
Returns
-------
bool
True if even if only one atom map is missing. False if all atoms have atom maps.
"""
toolkit = _set_toolkit(molecule)
return toolkit.is_missing_atom_map(molecule)
def is_map_canonical(molecule):
"""
Check if map indices on molecule is in caononical order
Parameters
----------
molecule:
`oechem.Mol` or `rdkit.Chem.Mol`
Returns
-------
bool
"""
toolkit = _set_toolkit(molecule)
return toolkit.is_map_canonical(molecule)
def remove_atom_map(molecule, **kwargs):
"""
Remove atom map from molecule
Parameters
----------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
keep_map_data: bool, optional, default True
If True, will save map indices in atom data
"""
toolkit = _set_toolkit(molecule)
toolkit.remove_atom_map(molecule, **kwargs)
def restore_atom_map(molecule):
"""
Restore atom map from atom data in place
Parameters
----------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol`
"""
toolkit = _set_toolkit(molecule)
toolkit.restore_atom_map(molecule)
if not has_atom_map(molecule):
warnings.warn("There were no atom maps in atom data to restore")
def add_atom_map(molecule, **kwargs):
"""
Add canonical ordered atom map to molecule
Parameters
----------
molecule :
`oechem.OEMOl` or `rdkit.Chem.Mol`
Returns
-------
molecule with map indices
"""
toolkit = _set_toolkit(molecule)
return toolkit.add_atom_map(molecule, **kwargs)
def has_stereo_defined(molecule):
"""
Checks if molecule has all stereo defined.
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if all stereo defined, False otherwise
Notes
-----
This does not check if all chirality or bond stereo are consistent. The best way to check is to try to generate a
3D conformer. If stereo information is inconsistent, this will fail.
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_stereo_defined(molecule)
def has_explicit_hydrogen(molecule):
#ToDo: Use option in RDKit to generate explicit hydrogen molecules from explicit hydrogen SMILES
"""
Check if molecule has explicit hydrogen.
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
bool
True if has all explicit H. False otherwise.
"""
toolkit = _set_toolkit(molecule)
return toolkit.has_explicit_hydrogen(molecule)
def add_explicit_hydrogen(molecule):
"""
Add explicit hydrogen to molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
molecule
`oechem.OEMol` or `rdkit.Chem.Mol` with explict hydrogen
"""
toolkit = _set_toolkit(molecule)
return toolkit.add_explicit_hydrogen(molecule)
def get_charge(molecule):
"""
Get charge state of molecule
Parameters
----------
molecule:
`oechem.OEMol` or `rdkit.Chem.Mol`
Returns
-------
int
total charge of molecule
"""
charge = 0
for atom in molecule.GetAtoms():
charge += atom.GetFormalCharge()
return charge
def _set_toolkit(molecule):
"""
Set toolkit to use by checking molecule instance and if the toolkit is installed
Parameters
----------
molecule:
oechem.OEMol or rdkit.Chem.Mol
Returns
-------
toolkit: module
either cmiles._cmiles_oe or cmiles._cmiles_rd
"""
if has_openeye and isinstance(molecule, oechem.OEMolBase):
import cmiles._cmiles_oe as toolkit
elif has_rdkit and isinstance(molecule, Chem.rdchem.Mol):
import cmiles._cmiles_rd as toolkit
else:
raise RuntimeError("Must have openeye or rdkit installed")
return toolkit
def invert_atom_map(atom_map):
"""
Invert atom map `{map_idx:atom_idx} --> {atom_idx:map_idx}`
Parameters
----------
atom_map: dict
`{map_idx:atom_idx}`
Returns
-------
dict
`{atom_idx:map_idx}`
"""
return dict(zip(atom_map.values(), atom_map.keys()))
|
py | 1a414416cc0a04b6cefffd78e08881ddc4cb427b | from retina.utils import Registry
DETECTORS = Registry('detector')
|
py | 1a41459bce9f03aacadc782d224e4d29f454ec9e | from cx_Freeze import setup, Executable
includefiles = []
includes = []
excludes = []
packages = ["PIL.Image", "PIL.WebPImagePlugin"]
setup(
name = "WEBP Converter",
version = "0.1.0",
description = "This is my program",
options = {'build_exe': {'includes': includes, 'excludes': excludes, 'packages': packages, 'include_files': includefiles}},
executables = [Executable("converter.py")]
)
|
py | 1a414795301fc535158f849e0246b1a6acf015ab | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
_here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(_here, 'README.rst'), encoding='utf-8') as f:
README = f.read()
with open(os.path.join(_here, 'LICENSE'), encoding='utf-8') as f:
LICENSE = f.read()
version = {}
with open(os.path.join(_here, 'codeforces', 'version.py')) as f:
exec(f.read(), version)
setup(
name='codeforces',
version=version['__version__'],
description='Simple wrapper for the codeforces API',
long_description=README,
author='Vicfred',
author_email='[email protected]',
url='vicfred.dev',
license=LICENSE,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'requests>=2.20.0'
]
)
|
py | 1a41482f42e60971eb2e49cad0cebe7b0abd4781 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import random
from frappe.utils import random_string
from frappe.desk import query_report
from erpnext.accounts.doctype.journal_entry.journal_entry import get_payment_entry_against_invoice
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
from frappe.utils.make_random import get_random
from erpnext.accounts.doctype.payment_request.payment_request import make_payment_request, make_payment_entry
from erpnext.demo.user.sales import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
if random.random() <= 0.6:
report = "Ordered Items to be Billed"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
si = frappe.get_doc(make_sales_invoice(so))
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.insert()
si.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() <= 0.6:
report = "Received Items to be Billed"
for pr in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
pi = frappe.get_doc(make_purchase_invoice(pr))
pi.posting_date = frappe.flags.current_date
pi.bill_no = random_string(6)
pi.insert()
pi.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() < 0.5:
make_payment_entries("Sales Invoice", "Accounts Receivable")
if random.random() < 0.5:
make_payment_entries("Purchase Invoice", "Accounts Payable")
if random.random() < 0.1:
#make payment request against sales invoice
sales_invoice_name = get_random("Sales Invoice", filters={"docstatus": 1})
if sales_invoice_name:
si = frappe.get_doc("Sales Invoice", sales_invoice_name)
if si.outstanding_amount > 0:
payment_request = make_payment_request(dt="Sales Invoice", dn=si.name, recipient_id=si.contact_email,
submit_doc=True, mute_email=True, use_dummy_message=True)
payment_entry = frappe.get_doc(make_payment_entry(payment_request.name))
payment_entry.posting_date = frappe.flags.current_date
payment_entry.submit()
make_pos_invoice()
def make_payment_entries(ref_doctype, report):
outstanding_invoices = list(set([r[3] for r in query_report.run(report,
{"report_date": frappe.flags.current_date })["result"] if r[2]==ref_doctype]))
# make Payment Entry
for inv in outstanding_invoices[:random.randint(1, 2)]:
pe = get_payment_entry(ref_doctype, inv)
pe.posting_date = frappe.flags.current_date
pe.reference_no = random_string(6)
pe.reference_date = frappe.flags.current_date
pe.insert()
pe.submit()
frappe.db.commit()
outstanding_invoices.remove(inv)
# make payment via JV
for inv in outstanding_invoices[:1]:
jv = frappe.get_doc(get_payment_entry_against_invoice(ref_doctype, inv))
jv.posting_date = frappe.flags.current_date
jv.cheque_no = random_string(6)
jv.cheque_date = frappe.flags.current_date
jv.insert()
jv.submit()
frappe.db.commit()
def make_pos_invoice():
make_sales_order()
for data in frappe.get_all('Sales Order', fields=["name"],
filters = [["per_billed", "<", "100"]]):
si = frappe.get_doc(make_sales_invoice(data.name))
si.is_pos =1
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.set_missing_values()
make_payment_entries_for_pos_invoice(si)
si.insert()
si.submit()
def make_payment_entries_for_pos_invoice(si):
for data in si.payments:
data.amount = si.outstanding_amount
return
|
py | 1a414899ae8cc750dd1e9248438327e9535e533d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 19:59:07 2021
@author: Alexander Southan
"""
import numpy as np
import unittest
from src.pyPreprocessing import transform
class TestTransform(unittest.TestCase):
def test_transform(self):
x = np.linspace(0, 10, 1100)
y = x**2 -30
# test lls transformation
y_lls = transform.transform([y], 'log_log_sqrt', direction='direct')
y_lls_inv = transform.transform(
y_lls, 'log_log_sqrt', direction='inverse', min_value=y.min())
self.assertTrue(np.allclose(y, y_lls_inv[0]))
# test errors
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sq',
direction='direct')
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sq',
direction='inverse')
self.assertRaises(
ValueError, transform.transform, [y], 'log_log_sqrt',
direction='dir')
def test_normalize(self):
x = np.linspace(0, 10, 1100)
y = x**2 -30
y_norm = transform.normalize([y], 'total_intensity', x_data=x)
self.assertAlmostEqual(np.trapz(y_norm, x=x, axis=1)[0], 1)
y_norm_2 = transform.normalize([y], 'total_intensity', x_data=x,
factor=3.25)
self.assertAlmostEqual(np.trapz(y_norm_2, x=x, axis=1)[0], 3.25)
# test errors
self.assertRaises(ValueError, transform.normalize, [y], 'tot_int')
|
py | 1a4148fdc05b2a2083cd68a4d91fff0e816043e3 | import os, sys
def main():
os.environ['PYTHONPATH'] = ':'.join(sys.path)
os.execve(sys.executable, sys.argv, os.environ)
|
py | 1a414959b9039474d1d83c2241a405148225fdbb | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
|
py | 1a41498a215156cec67ece593eeb5a45114d1c12 | from manga_py.provider import Provider
from .helpers.std import Std
class MangaWindowNet(Provider, Std):
__url = None
def get_chapter_index(self) -> str:
return self.chapter[0].replace('.', '-')
def get_content(self):
return self.http_get(self.__url)
def get_manga_name(self) -> str:
title = self.html_fromstring(self.get_url(), '.item-title > a, .nav-title > a', 0)
self.__url = self.http().normalize_uri(title.get('href'))
return title.text_content().strip()
def get_chapters(self):
items = self._elements('.chapter-list a.chapt')
result = []
re = self.re.compile(r'[Cc]h\.(\d+(?:\.\d+)?)')
n = self.http().normalize_uri
for i in items:
text = i.cssselect('b')[0].text_content()
if 'deleted' not in text.casefold():
result.append((
re.search(text).group(1),
n(i.get('href')),
))
return result
def get_files(self):
re = self.re.compile(r'images\s*=\s*({.+});')
content = self.http_get(self.chapter[1])
items = self.json.loads(re.search(content).group(1))
return [items[i] for i in sorted(items, key=lambda i: int(i))]
def get_cover(self) -> str:
return self._cover_from_content('.attr-cover > img')
def book_meta(self) -> dict:
pass
def chapter_for_json(self) -> str:
return self.chapter[1]
def prepare_cookies(self):
self.cf_scrape(self.get_url())
main = MangaWindowNet
|
py | 1a4149963054b5daa218fb56f4b0bfe864f25aca | class VisualizatorChooser(object):
def __init__(self):
self.visualizators = {}
self.generate_visualizators()
def add_visualizator(self, url, visualizator):
self.visualizators[url] = visualizator
def choose(self, url, report, *args, **kwargs):
visualizator = self.visualizators.get(url, self.DEFAULT)
return visualizator(url, report, *args, **kwargs)
|
py | 1a414accc4d465fe5aed8aeebf0ce5c2edbf7fef | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from djipsum import (__VERSION__, __AUTHOR__, __AUTHOR_EMAIL__)
setup(
name="djipsum",
packages=find_packages(exclude=["*.demo"]),
version=__VERSION__,
url="https://github.com/agusmakmun/djipsum/",
download_url="https://github.com/agusmakmun/djipsum/tarball/v{}".format(__VERSION__),
description="Django Lorem Ipsum Generator - Command plugin to generate (fake content data) for Django model.",
long_description=open("README.rst").read(),
license="MIT",
author=__AUTHOR__,
author_email=__AUTHOR_EMAIL__,
keywords=[
"djipsum", "django fake content data",
"django lorem ipsum generator",
"django unitest tool"
],
zip_safe=False,
include_package_data=True,
install_requires=["Django>=1.10.1", "Faker>=0.7.3"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Framework :: Django",
]
)
|
py | 1a414acfba489fd2c7347e139f6e27cb5922c0a0 | """
Fourier Transforms
The frequency components of an image can be displayed after doing a Fourier Transform (FT).
An FT looks at the components of an image (edges that are high-frequency, and areas of smooth
color as low-frequency), and plots the frequencies that occur as points in spectrum.
In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency,
and you can look at an interesting visualization of these sine wave components on this page.
We'll first look at a few simple image patterns to build up an idea of what image frequency
components look like, and then transform a more complex image to see what it looks like in the frequency domain.
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Read in the images
image_stripes = cv2.imread('images/stripes.jpg')
image_solid = cv2.imread('images/pink_solid.jpg')
# Change color to RGB (from BGR)
image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB)
image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB)
# Display the images
f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5))
ax1.imshow(image_stripes)
ax2.imshow(image_solid)
# convert to grayscale to focus on the intensity patterns in the image
gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY)
gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY)
# normalize the image color values from a range of [0,255] to [0,1] for further processing
norm_stripes = gray_stripes / 255.0
norm_solid = gray_solid / 255.0
# perform a fast fourier transform and create a scaled, frequency transform image
def ft_image(norm_image):
'''
This function takes in a normalized, grayscale image
and returns a frequency spectrum transform of that image.
'''
f = np.fft.fft2(norm_image)
fshift = np.fft.fftshift(f)
frequency_tx = 20 * np.log(np.abs(fshift))
return frequency_tx
f_stripes = ft_image(norm_stripes)
f_solid = ft_image(norm_solid)
# display the images
# original images to the left of their frequency transform
f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10))
ax1.set_title('original image')
ax1.imshow(image_stripes)
ax2.set_title('frequency transform image')
ax2.imshow(f_stripes, cmap='gray')
ax3.set_title('original image')
ax3.imshow(image_solid)
ax4.set_title('frequency transform image')
ax4.imshow(f_solid, cmap='gray')
"""
Low frequencies are at the center of the frequency transform image.
The transform images for these example show that the solid image has most
low-frequency components (as seen by the center bright spot).
The stripes transform image contains low-frequencies for the areas of white
and black color and high frequencies for the edges in between those colors.
The stripes transform image also tells us that there is one dominating direction
for these frequencies; vertical stripes are represented by a horizontal line passing
through the center of the frequency transform image.
"""
# Read in an image
image = cv2.imread('images/birds.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
norm_image = gray/255.0
f_image = ft_image(norm_image)
f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(image)
ax2.imshow(f_image, cmap='gray')
"""
This image has components of all frequencies. You can see a bright spot in the center
of the transform image, which tells us that a large portion of the image is low-frequency;
this makes sense since the body of the birds and background are solid colors. The transform
image also tells us that there are two dominating directions for these frequencies;
vertical edges (from the edges of birds) are represented by a horizontal line passing through
the center of the frequency transform image, and horizontal edges (from the branch and tops
of the birds' heads) are represented by a vertical line passing through the center.
""" |
py | 1a414c94e319a92bd100edc1dedcb914a0d7f3b6 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Setup and checking of known good output for CLI tests"""
import functools
import hashlib
import importlib
import os
import pathlib
import shlex
import shutil
import pytest
from improver import cli
from improver.constants import DEFAULT_TOLERANCE
from improver.utilities.compare import compare_netcdfs
RECREATE_DIR_ENVVAR = "RECREATE_KGO"
ACC_TEST_DIR_ENVVAR = "IMPROVER_ACC_TEST_DIR"
IGNORE_CHECKSUMS = "IMPROVER_IGNORE_CHECKSUMS"
ACC_TEST_DIR_MISSING = pathlib.Path("/dev/null")
DEFAULT_CHECKSUM_FILE = pathlib.Path(__file__).parent / "SHA256SUMS"
IGNORED_ATTRIBUTES = ["history", "Conventions"]
def run_cli(cli_name, verbose=True):
"""
Prepare a function for running clize CLIs.
Use of the returned function avoids writing "improver" and the CLI name in
each test function.
Checksums of input files are verified before the clize CLI is run.
Args:
cli_name (str): name of the CLI
verbose (bool): pass verbose option to CLI
Returns:
Callable([Iterable[str], None]): function to run the specified CLI
"""
def run_function(args):
if not checksum_ignore():
verify_checksums(args)
cli.main("improver", cli_name, *args, verbose=verbose)
return run_function
def cli_name_with_dashes(dunder_file):
"""
Convert an acceptance test module name to the corresponding CLI
Args:
dunder_file (str): test module name retrieved from __file__
Returns:
str: CLI name
"""
module_name = str(pathlib.Path(dunder_file).stem)
if module_name.startswith("test_"):
module_name = module_name[5:]
module_dashes = module_name.replace("_", "-")
return module_dashes
@functools.lru_cache()
def acceptance_checksums(checksum_path=DEFAULT_CHECKSUM_FILE):
"""
Retrieve a list of checksums from file in text list format, as produced by
the sha256sum command line tool.
Args:
checksum_path (pathlib.Path): Path to checksum file. File
should be plain text in the format produced by the sha256sum
command line tool. Paths listed in the file should be relative to
the KGO root directory found by kgo_root().
Returns:
Dict[pathlib.Path, str]: Dict with keys being relative paths and
values being hexadecimal checksums
"""
if checksum_path is None:
checksum_path = DEFAULT_CHECKSUM_FILE
with open(checksum_path, mode="r") as checksum_file:
checksum_lines = checksum_file.readlines()
checksums = {}
for line in checksum_lines:
parts = line.strip().split(" ", maxsplit=1)
csum = parts[0]
path = pathlib.Path(parts[1])
checksums[path] = csum
return checksums
def verify_checksum(kgo_path, checksums=None, checksum_path=DEFAULT_CHECKSUM_FILE):
"""
Verify an individual KGO file's checksum.
Args:
kgo_path (pathlib.Path): Path to file in KGO directory
checksums (Optional[Dict[pathlib.Path, str]]): Lookup dictionary
mapping from paths to hexadecimal checksums. If provided, used in
preference to checksum_path.
checksum_path (pathlib.Path): Path to checksum file, used if checksums is
None. File should be plain text in the format produced by the
sha256sum command line tool.
Raises:
KeyError: File being verified is not found in checksum dict/file
ValueError: Checksum does not match value in checksum dict/file
"""
if checksums is None:
checksums_dict = acceptance_checksums(checksum_path=checksum_path)
checksums_source = checksum_path
else:
checksums_dict = checksums
checksums_source = "lookup dict"
kgo_csum = calculate_checksum(kgo_path)
kgo_norm_path = pathlib.Path(os.path.normpath(kgo_path))
kgo_rel_path = kgo_norm_path.relative_to(kgo_root())
try:
expected_csum = checksums_dict[kgo_rel_path]
except KeyError:
msg = f"Checksum for {kgo_rel_path} missing from {checksums_source}"
raise KeyError(msg)
if kgo_csum != expected_csum:
msg = (
f"Checksum for {kgo_rel_path} is {kgo_csum}, "
f"expected {expected_csum} in {checksums_source}"
)
raise ValueError(msg)
def calculate_checksum(path):
"""
Calculate SHA256 hash/checksum of a file
Args:
path (pathlib.Path): Path to file
Returns:
str: checksum as hexadecimal string
"""
hasher = hashlib.sha256()
with open(path, mode="rb") as kgo_file:
while True:
# read 1 megabyte binary chunks from file and feed them to hasher
kgo_chunk = kgo_file.read(2 ** 20)
if not kgo_chunk:
break
hasher.update(kgo_chunk)
checksum = hasher.hexdigest()
return checksum
def verify_checksums(cli_arglist):
"""
Verify input file checksums based on input arguments to a CLI.
Intended for use inside acceptance tests, so raises exceptions to report
various issues that should result in a test failure.
Args:
cli_arglist (List[Union[str,pathlib.Path]]): list of arguments being
passed to a CLI such as via improver.cli.main function.
"""
# copy the arglist as it will be edited to remove output args
arglist = cli_arglist.copy()
# if there is an --output argument, remove the path in the following argument
try:
output_idx = cli_arglist.index("--output")
arglist.pop(output_idx + 1)
except ValueError:
pass
# drop arguments of the form --output=file
arglist = [
arg
for arg in arglist
if not isinstance(arg, str) or not arg.startswith("--output=")
]
# check for non-path-type arguments that refer to KGOs
kgo_dir = str(kgo_root())
path_strs = [arg for arg in arglist if isinstance(arg, str) and kgo_dir in arg]
if path_strs:
msg = (
f"arg list contains KGO paths as strings {path_strs}, "
"expected paths to be pathlib.Path objects"
)
raise ValueError(msg)
# verify checksums of remaining path-type arguments
path_args = [arg for arg in arglist if isinstance(arg, pathlib.Path)]
for arg in path_args:
# expand any globs in the argument and verify each of them
arg_globs = list(arg.parent.glob(arg.name))
for arg_glob in arg_globs:
verify_checksum(arg_glob)
def checksum_ignore():
"""True if CHECKSUMs should be checked"""
return os.getenv(IGNORE_CHECKSUMS, "false").lower() == "true"
def kgo_recreate():
"""True if KGO should be re-created"""
return RECREATE_DIR_ENVVAR in os.environ
def kgo_root():
"""Path to the root of the KGO directories"""
try:
test_dir = os.environ[ACC_TEST_DIR_ENVVAR]
except KeyError:
return ACC_TEST_DIR_MISSING
return pathlib.Path(test_dir)
def kgo_exists():
"""True if KGO files exist"""
return not kgo_root().samefile(ACC_TEST_DIR_MISSING)
def recreate_if_needed(output_path, kgo_path, recreate_dir_path=None):
"""
Re-create a file in the KGO, depending on configuration.
Args:
output_path (pathlib.Path): Path to output produced by test
kgo_path (pathlib.Path): Path to expected/original KGO file
recreate_dir_path (Optional[pathlib.Path]): Path to directory where
recreated KGOs will be placed. Default is environment variable
specified in RECREATE_DIR_ENVVAR constant.
Returns:
bool: True if KGO was recreated
"""
if not kgo_recreate():
return False
if not kgo_path.is_absolute():
raise IOError("KGO path is not absolute")
if not output_path.is_file():
raise IOError("Expected output file not created by running test")
if recreate_dir_path is None:
recreate_dir_path = pathlib.Path(os.environ[RECREATE_DIR_ENVVAR])
kgo_root_dir = kgo_root()
if kgo_root_dir not in kgo_path.parents:
raise IOError("KGO path for test is not within KGO root directory")
if not recreate_dir_path.is_absolute():
raise IOError("Recreate KGO path is not absolute")
print("Comparison found differences - recreating KGO for this test")
if kgo_path.exists():
print(f"Original KGO file is at {kgo_path}")
else:
print("Original KGO file does not exist")
kgo_relative = kgo_path.relative_to(kgo_root_dir)
recreate_file_path = recreate_dir_path / kgo_relative
if recreate_file_path == kgo_path:
err = (
f"Recreate KGO path {recreate_file_path} must be different from"
f" original KGO path {kgo_path} to avoid overwriting"
)
raise IOError(err)
recreate_file_path.parent.mkdir(exist_ok=True, parents=True)
if recreate_file_path.exists():
recreate_file_path.unlink()
shutil.copyfile(str(output_path), str(recreate_file_path))
print(f"Updated KGO file is at {recreate_file_path}")
print(
f"Put the updated KGO file in {ACC_TEST_DIR_ENVVAR} to make this"
f" test pass. For example:"
)
quoted_kgo = shlex.quote(str(kgo_path))
quoted_recreate = shlex.quote(str(recreate_file_path))
print(f"cp {quoted_recreate} {quoted_kgo}")
return True
def statsmodels_available():
"""True if statsmodels library is importable"""
if importlib.util.find_spec("statsmodels"):
return True
return False
def iris_nimrod_patch_available():
"""True if iris_nimrod_patch library is importable"""
if importlib.util.find_spec("iris_nimrod_patch"):
return True
return False
def compare(
output_path,
kgo_path,
recreate=True,
atol=DEFAULT_TOLERANCE,
rtol=DEFAULT_TOLERANCE,
exclude_vars=None,
):
"""
Compare output against expected using KGO file with absolute and
relative tolerances. Also recreates KGO if that setting is enabled.
Args:
output_path (pathlib.Path): Path to output produced by test
kgo_path (pathlib.Path): Path to KGO file
recreate (bool): False to disable KGO recreation, compare only
atol (float): Absolute tolerance
rtol (float): Relative tolerance
exclude_vars (Iterable[str]): Variables to exclude from comparison
Returns:
None
"""
# don't show this function in pytest tracebacks
# pylint: disable=unused-variable
__tracebackhide__ = True
assert output_path.is_absolute()
assert kgo_path.is_absolute()
if not isinstance(atol, (int, float)):
raise ValueError("atol")
if not isinstance(rtol, (int, float)):
raise ValueError("rtol")
difference_found = False
message = ""
def message_recorder(exception_message):
nonlocal difference_found
nonlocal message
difference_found = True
message = exception_message
compare_netcdfs(
output_path,
kgo_path,
atol=atol,
rtol=rtol,
exclude_vars=exclude_vars,
reporter=message_recorder,
ignored_attributes=IGNORED_ATTRIBUTES,
)
if difference_found:
if recreate:
recreate_if_needed(output_path, kgo_path)
raise AssertionError(message)
if not checksum_ignore():
verify_checksum(kgo_path)
# Pytest decorator to skip tests if KGO is not available for use
# pylint: disable=invalid-name
skip_if_kgo_missing = pytest.mark.skipif(not kgo_exists(), reason="KGO files required")
# Pytest decorator to skip tests if statsmodels is available
# pylint: disable=invalid-name
skip_if_statsmodels = pytest.mark.skipif(
statsmodels_available(), reason="statsmodels library is available"
)
# Pytest decorator to skip tests if statsmodels is not available
# pylint: disable=invalid-name
skip_if_no_statsmodels = pytest.mark.skipif(
not statsmodels_available(), reason="statsmodels library is not available"
)
# Pytest decorator to skip tests if iris_nimrod_patch is not available
# pylint: disable=invalid-name
skip_if_no_iris_nimrod_patch = pytest.mark.skipif(
not iris_nimrod_patch_available(),
reason="iris_nimrod_patch library is not available",
)
|
py | 1a414ecd155816c848aee59d5585d2532d7a7e5b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/26 14:20
# @Author : Adyan
# @File : setup.py
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Adyan",
version="0.0.2",
author="Adyan",
author_email="[email protected]",
description="Special package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/liujiang9/Utils",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
)
|
py | 1a414f7e958c512bfa6c55983f7ec5a7ba16b904 | from typing import Callable
try: # Assume we're a sub-module in a package.
from utils import numeric as nm
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ..utils import numeric as nm
def shifted_func(func) -> Callable:
def func_(x, y) -> list:
assert len(x) == len(y)
shift_max = len(x) - 1
result = list()
for shift in range(-shift_max + 2, 0):
shifted_x = x[0: shift_max + shift]
shifted_y = y[- shift: shift_max]
stat = func(shifted_x, shifted_y)
result.append(stat)
for shift in range(0, shift_max - 1):
shifted_x = x[shift: shift_max]
shifted_y = y[0: shift_max - shift]
stat = func(shifted_x, shifted_y)
result.append(stat)
return result
return func_
def pair_filter(function=nm.is_defined) -> Callable:
def func(a, b) -> tuple:
a_filtered, b_filtered = list(), list()
for cur_a, cur_b in zip(a, b):
take_a = function(cur_a)
take_b = function(cur_b)
if take_a and take_b:
a_filtered.append(cur_a)
b_filtered.append(cur_b)
return a_filtered, b_filtered
return func
def pair_stat(stat_func, filter_func=None) -> Callable:
def func(a, b) -> float:
if filter_func:
data = pair_filter(filter_func)(a, b)
else:
data = (a, b)
return stat_func(*data)
return func
def corr() -> Callable:
return pair_stat(
filter_func=nm.is_nonzero,
stat_func=lambda *v: nm.corr(*v),
)
|
py | 1a41529dd7bc6ee4d50c877075e5d6aa26006a71 | #Converts the aspell dictionary to json file type stripping away unessential data
from pyparsing import *
import glob
import json
scowl = open("scowl-googlebooks-report original.txt", "r")
scowlString = scowl.read().lower()
period = Suppress(Literal("."))
integer = Word(nums).setParseAction(lambda t: int(t[0]))
complexNum = Combine(integer + Optional(Suppress(Literal(",")+integer)) +Optional( period + integer))
word = OneOrMore(Word(alphanums))
bar =Suppress(Literal("|"))
line = word + bar + complexNum + Suppress(restOfLine)
line2 = word + bar + complexNum+Literal("x") + Suppress(complexNum + complexNum + bar + word + period + bar + word + period)
full = OneOrMore(Group(Suppress(line2("Type2"))|line("Type1") ))
list =full.parseString(scowlString)
last_Term = 0
string = ""
for term in list:
if len(term) == 2:
string+=('"' + term[0] + '":'+term[1]+"," + "\n")
last_Term = term[1]
# if len(term) == 3:
# string+=('"' + term[0] + '":'+str(int(term[1])*int(last_Term))+","+ "\n")
print string
file = open("dictionary.json", "w")
file.write("{"+string+"}")
|
py | 1a4152c0a4dad22eaf4c539dca8da34cf3b4dd3f | import math
# pad input string with character c and modulo operand mod_op
def padWithChars(sinp,c,mod_op):
ret_val = sinp
if len(sinp) % mod_op == 0:
return ret_val
for i in range(0,mod_op-len(sinp)%mod_op):
ret_val += c
return ret_val
# split input string into a list where each element is a group of n characters
def getInputList(sinp,n):
ret_val = []
while sinp != "":
ret_val.append(sinp[:n])
sinp = sinp[n:]
return ret_val
# eliminate leading 0b from string and pad with zeroes (in front) until there are 8 bits
def getFormattedBitString(binp,num_bits):
ret_val = binp[2:]
if ret_val == 0 and num_bits == 8:
return "00000000"
if ret_val == 0 and num_bits == 32:
return "00000000000000000000000000000000"
for i in range(0,num_bits-len(ret_val)):
ret_val = '0' + ret_val
return ret_val
# convert each character of string into binary string
def findBitPattern(sinp):
ret_val = ""
for i in range(0,len(sinp)):
ret_val += getFormattedBitString(bin(ord(sinp[i])),8)
return ret_val
# convert n*8 bit binary string to n ascii chars
def findAsciiPattern(sinp,n):
ret_val = ""
for i in range(0,n):
ret_val += chr(int(sinp[:8],2))
sinp = sinp[8:]
return ret_val
# convert input number into base 85 number (as a list)
def getBase85ValueList(dinp):
ret_val = []
div_dinp = dinp
while div_dinp != 0:
mod_dinp = div_dinp % 85
div_dinp = int(math.floor(div_dinp / 85))
ret_val.insert(0,str(mod_dinp))
return ret_val
# convert base 85 to base 10
def base85ToBase10(linp):
ret_val = 0
digits = len(linp)
for i in range(0,digits):
ret_val += linp[i] * (85 ** (digits - i - 1))
return ret_val
# add 33 to each number in list above and convert to ascii
def add33ConvertAscii(sinp):
ret_val = ""
for elmt in sinp:
ascii_int_partition = int(elmt) + 33
ret_val += chr(ascii_int_partition)
return ret_val
# eliminate trailing characters matching the number of trailing zeroes the input was padded with
def unpadResult(sinp,pad):
# this was fiddled with to get it working. I need to revisit why this is the correct conditional.
if pad % 4 == 0 or pad % 5 == 0:
return sinp
return sinp[:-pad]
# convert ascii to int and subtract 33 for each character; store each result in a list
def sub33NumList(sinp):
ret_val = []
for elmt in sinp:
ret_val.append(int(findBitPattern(elmt),2)-33)
return ret_val
# compute Base85 for all sets of 4 characters in input
def encodeAllSubSections(linp):
ret_val = ""
for elmt in linp:
bit_pattern = findBitPattern(elmt)
int_64bit = int(bit_pattern,2)
list_85base = getBase85ValueList(int_64bit)
ret_val += add33ConvertAscii(list_85base)
return ret_val
# decode for all sets of 5 characters in input in encoded result
def decodeAllSubSections(linp):
ret_val = ""
for elmt in linp:
sub_33_list = sub33NumList(elmt)
int_64bit = base85ToBase10(sub_33_list)
bit_pattern = getFormattedBitString(bin(int_64bit),32)
ret_val += findAsciiPattern(bit_pattern,4)
return ret_val
# encode sinp
def encodeAscii85(sinp):
padded_input = padWithChars(sinp,'\0',4)
padded_offset = 4 - (len(sinp)%4)
input_list = getInputList(padded_input,4)
final_result = unpadResult(encodeAllSubSections(input_list),padded_offset)
return final_result
# decode sinp
def decodeAscii85(sinp):
padded_input = padWithChars(sinp,'u',5)
padded_offset = 5 - (len(sinp)%5)
input_list = getInputList(padded_input,5)
final_result = unpadResult(decodeAllSubSections(input_list),padded_offset)
return final_result
|
py | 1a4152d251f4fcf0190d62397ad55b8b5cfd2ca3 | # coding: utf-8
"""
FreeClimb API
FreeClimb is a cloud-based application programming interface (API) that puts the power of the Vail platform in your hands. FreeClimb simplifies the process of creating applications that can use a full range of telephony features without requiring specialized or on-site telephony equipment. Using the FreeClimb REST API to write applications is easy! You have the option to use the language of your choice or hit the API directly. Your application can execute a command by issuing a RESTful request to the FreeClimb API. The base URL to send HTTP requests to the FreeClimb REST API is: /apiserver. FreeClimb authenticates and processes your request. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from freeclimb.configuration import Configuration
class CallResultAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'call_id': 'str',
'parent_call_id': 'str',
'account_id': 'str',
'_from': 'str',
'to': 'str',
'phone_number_id': 'str',
'status': 'str',
'start_time': 'str',
'connect_time': 'str',
'end_time': 'str',
'duration': 'int',
'connect_duration': 'int',
'direction': 'str',
'answered_by': 'str',
'subresource_uris': 'object'
}
attribute_map = {
'call_id': 'callId',
'parent_call_id': 'parentCallId',
'account_id': 'accountId',
'_from': 'from',
'to': 'to',
'phone_number_id': 'phoneNumberId',
'status': 'status',
'start_time': 'startTime',
'connect_time': 'connectTime',
'end_time': 'endTime',
'duration': 'duration',
'connect_duration': 'connectDuration',
'direction': 'direction',
'answered_by': 'answeredBy',
'subresource_uris': 'subresourceUris'
}
def __init__(self, call_id=None, parent_call_id=None, account_id=None, _from=None, to=None, phone_number_id=None, status=None, start_time=None, connect_time=None, end_time=None, duration=None, connect_duration=None, direction=None, answered_by=None, subresource_uris=None, local_vars_configuration=None): # noqa: E501
"""CallResultAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._call_id = None
self._parent_call_id = None
self._account_id = None
self.__from = None
self._to = None
self._phone_number_id = None
self._status = None
self._start_time = None
self._connect_time = None
self._end_time = None
self._duration = None
self._connect_duration = None
self._direction = None
self._answered_by = None
self._subresource_uris = None
self.discriminator = None
if call_id is not None:
self.call_id = call_id
if parent_call_id is not None:
self.parent_call_id = parent_call_id
if account_id is not None:
self.account_id = account_id
if _from is not None:
self._from = _from
if to is not None:
self.to = to
if phone_number_id is not None:
self.phone_number_id = phone_number_id
if status is not None:
self.status = status
if start_time is not None:
self.start_time = start_time
if connect_time is not None:
self.connect_time = connect_time
if end_time is not None:
self.end_time = end_time
if duration is not None:
self.duration = duration
if connect_duration is not None:
self.connect_duration = connect_duration
if direction is not None:
self.direction = direction
if answered_by is not None:
self.answered_by = answered_by
if subresource_uris is not None:
self.subresource_uris = subresource_uris
@property
def call_id(self):
"""Gets the call_id of this CallResultAllOf. # noqa: E501
String that uniquely identifies this Call resource. # noqa: E501
:return: The call_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._call_id
@call_id.setter
def call_id(self, call_id):
"""Sets the call_id of this CallResultAllOf.
String that uniquely identifies this Call resource. # noqa: E501
:param call_id: The call_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._call_id = call_id
@property
def parent_call_id(self):
"""Gets the parent_call_id of this CallResultAllOf. # noqa: E501
ID of the Call that created this leg (child Call). # noqa: E501
:return: The parent_call_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._parent_call_id
@parent_call_id.setter
def parent_call_id(self, parent_call_id):
"""Sets the parent_call_id of this CallResultAllOf.
ID of the Call that created this leg (child Call). # noqa: E501
:param parent_call_id: The parent_call_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._parent_call_id = parent_call_id
@property
def account_id(self):
"""Gets the account_id of this CallResultAllOf. # noqa: E501
ID of the account that owns this Call. # noqa: E501
:return: The account_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this CallResultAllOf.
ID of the account that owns this Call. # noqa: E501
:param account_id: The account_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def _from(self):
"""Gets the _from of this CallResultAllOf. # noqa: E501
Phone number that initiated this Call. # noqa: E501
:return: The _from of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self.__from
@_from.setter
def _from(self, _from):
"""Sets the _from of this CallResultAllOf.
Phone number that initiated this Call. # noqa: E501
:param _from: The _from of this CallResultAllOf. # noqa: E501
:type: str
"""
self.__from = _from
@property
def to(self):
"""Gets the to of this CallResultAllOf. # noqa: E501
Phone number that received this Call. # noqa: E501
:return: The to of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this CallResultAllOf.
Phone number that received this Call. # noqa: E501
:param to: The to of this CallResultAllOf. # noqa: E501
:type: str
"""
self._to = to
@property
def phone_number_id(self):
"""Gets the phone_number_id of this CallResultAllOf. # noqa: E501
If the Call was inbound, this is the ID of the IncomingPhoneNumber that received the Call (DNIS). If the Call was outbound, this is the ID of the phone number from which the Call was placed (ANI). # noqa: E501
:return: The phone_number_id of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._phone_number_id
@phone_number_id.setter
def phone_number_id(self, phone_number_id):
"""Sets the phone_number_id of this CallResultAllOf.
If the Call was inbound, this is the ID of the IncomingPhoneNumber that received the Call (DNIS). If the Call was outbound, this is the ID of the phone number from which the Call was placed (ANI). # noqa: E501
:param phone_number_id: The phone_number_id of this CallResultAllOf. # noqa: E501
:type: str
"""
self._phone_number_id = phone_number_id
@property
def status(self):
"""Gets the status of this CallResultAllOf. # noqa: E501
* `queued` – Call is ready and waiting in line before going out. * `ringing` – Call is currently ringing. * `inProgress` – Call was answered and is currently in progress. * `canceled` – Call was hung up while it was queued or ringing. * `completed` – Call was answered and has ended normally. * `busy` – Caller received a busy signal. * `failed` – Call could not be completed as dialed, most likely because the phone number was non-existent. * `noAnswer` – Call ended without being answered. # noqa: E501
:return: The status of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this CallResultAllOf.
* `queued` – Call is ready and waiting in line before going out. * `ringing` – Call is currently ringing. * `inProgress` – Call was answered and is currently in progress. * `canceled` – Call was hung up while it was queued or ringing. * `completed` – Call was answered and has ended normally. * `busy` – Caller received a busy signal. * `failed` – Call could not be completed as dialed, most likely because the phone number was non-existent. * `noAnswer` – Call ended without being answered. # noqa: E501
:param status: The status of this CallResultAllOf. # noqa: E501
:type: str
"""
allowed_values = ["queued", "ringing", "inProgress", "canceled", "completed", "busy", "failed", "noAnswer"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def start_time(self):
"""Gets the start_time of this CallResultAllOf. # noqa: E501
Start time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:return: The start_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this CallResultAllOf.
Start time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:param start_time: The start_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._start_time = start_time
@property
def connect_time(self):
"""Gets the connect_time of this CallResultAllOf. # noqa: E501
Time the Call was answered (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:return: The connect_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._connect_time
@connect_time.setter
def connect_time(self, connect_time):
"""Sets the connect_time of this CallResultAllOf.
Time the Call was answered (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call has not yet been dialed. # noqa: E501
:param connect_time: The connect_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._connect_time = connect_time
@property
def end_time(self):
"""Gets the end_time of this CallResultAllOf. # noqa: E501
End time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call did not complete successfully. # noqa: E501
:return: The end_time of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this CallResultAllOf.
End time of the Call (GMT) in RFC 1123 format (e.g., Mon, 15 Jun 2009 20:45:30 GMT). Empty if the Call did not complete successfully. # noqa: E501
:param end_time: The end_time of this CallResultAllOf. # noqa: E501
:type: str
"""
self._end_time = end_time
@property
def duration(self):
"""Gets the duration of this CallResultAllOf. # noqa: E501
Total length of the Call in seconds. Measures time between startTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:return: The duration of this CallResultAllOf. # noqa: E501
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this CallResultAllOf.
Total length of the Call in seconds. Measures time between startTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:param duration: The duration of this CallResultAllOf. # noqa: E501
:type: int
"""
self._duration = duration
@property
def connect_duration(self):
"""Gets the connect_duration of this CallResultAllOf. # noqa: E501
Length of time that the Call was connected in seconds. Measures time between connectTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:return: The connect_duration of this CallResultAllOf. # noqa: E501
:rtype: int
"""
return self._connect_duration
@connect_duration.setter
def connect_duration(self, connect_duration):
"""Sets the connect_duration of this CallResultAllOf.
Length of time that the Call was connected in seconds. Measures time between connectTime and endTime. This value is empty for busy, failed, unanswered or ongoing Calls. # noqa: E501
:param connect_duration: The connect_duration of this CallResultAllOf. # noqa: E501
:type: int
"""
self._connect_duration = connect_duration
@property
def direction(self):
"""Gets the direction of this CallResultAllOf. # noqa: E501
Direction of the Call. `inbound` for Calls into FreeClimb, `outboundAPI` for Calls initiated via the REST API, `outboundDial` for Calls initiated by the `OutDial` PerCL command. # noqa: E501
:return: The direction of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._direction
@direction.setter
def direction(self, direction):
"""Sets the direction of this CallResultAllOf.
Direction of the Call. `inbound` for Calls into FreeClimb, `outboundAPI` for Calls initiated via the REST API, `outboundDial` for Calls initiated by the `OutDial` PerCL command. # noqa: E501
:param direction: The direction of this CallResultAllOf. # noqa: E501
:type: str
"""
self._direction = direction
@property
def answered_by(self):
"""Gets the answered_by of this CallResultAllOf. # noqa: E501
If this Call was initiated with answering machine detection, either `human` or `machine`. Empty otherwise. # noqa: E501
:return: The answered_by of this CallResultAllOf. # noqa: E501
:rtype: str
"""
return self._answered_by
@answered_by.setter
def answered_by(self, answered_by):
"""Sets the answered_by of this CallResultAllOf.
If this Call was initiated with answering machine detection, either `human` or `machine`. Empty otherwise. # noqa: E501
:param answered_by: The answered_by of this CallResultAllOf. # noqa: E501
:type: str
"""
self._answered_by = answered_by
@property
def subresource_uris(self):
"""Gets the subresource_uris of this CallResultAllOf. # noqa: E501
The list of subresources for this Call. These include things like logs and recordings associated with the Call. # noqa: E501
:return: The subresource_uris of this CallResultAllOf. # noqa: E501
:rtype: object
"""
return self._subresource_uris
@subresource_uris.setter
def subresource_uris(self, subresource_uris):
"""Sets the subresource_uris of this CallResultAllOf.
The list of subresources for this Call. These include things like logs and recordings associated with the Call. # noqa: E501
:param subresource_uris: The subresource_uris of this CallResultAllOf. # noqa: E501
:type: object
"""
self._subresource_uris = subresource_uris
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.to_camel_case(attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif value is None:
continue
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CallResultAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CallResultAllOf):
return True
return self.to_dict() != other.to_dict()
def to_camel_case(self, snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
|
py | 1a4153fb44000fa3868e7279ff67a31769203841 | from django.contrib import admin
from django.utils.html import format_html
from .models import File
@admin.register(File)
class FileAdmin(admin.ModelAdmin):
view_on_site = False
raw_id_fields = ('version',)
list_display = ('__str__', 'addon_slug', 'addon_guid')
search_fields = (
'^version__addon__guid',
'^version__addon__slug',
)
list_select_related = ('version__addon',)
readonly_fields = (
'id',
'created',
'file_download_url',
)
fieldsets = (
(
None,
{
'fields': (
'id',
'created',
'version',
'filename',
'size',
'hash',
'original_hash',
'status',
'file_download_url',
'manifest_version',
)
},
),
(
'Details',
{
'fields': ('cert_serial_num', 'original_status'),
},
),
(
'Flags',
{
'fields': (
'strict_compatibility',
'is_signed',
'is_experiment',
'is_mozilla_signed_extension',
)
},
),
)
def addon_slug(self, instance):
return instance.addon.slug
def addon_guid(self, instance):
return instance.addon.guid
def file_download_url(self, instance):
return format_html(
'<a href="{}">Download file</a>', instance.get_absolute_url(attachment=True)
)
file_download_url.short_description = 'Download this file'
file_download_url.allow_tags = True
|
py | 1a415401c26ff919e3101629cdbf1ab99a42a2a6 | from torchvision import models, transforms
from torchvision.datasets import ImageFolder
from medcam import medcam
import torch
import cv2
from torch.utils.data import DataLoader
import gc
import shutil
import os
import unittest
CLEAR = True
class TestClassification(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestClassification, self).__init__(*args, **kwargs)
self.DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.current_path = os.path.dirname(os.path.abspath(__file__))
self.dataset = ImageFolder(os.path.join(self.current_path, 'data'), loader=self.load_image)
self.model = models.resnet152(pretrained=True)
self.model.to(device=self.DEVICE)
self.model.eval()
def load_image(self, image_path):
raw_image = cv2.imread(image_path)
raw_image = cv2.resize(raw_image, (224,) * 2)
image = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(raw_image[..., ::-1].copy())
image = image.to(self.DEVICE)
return image, raw_image
def test_gbp(self):
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gbp'), backend='gbp',
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcam(self):
layer = 'layer4'
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcam'), backend='gcam', layer=layer,
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcam_overlay(self):
layer = 'layer4'
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcam_overlay'), backend='gcam', layer=layer,
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0], raw_input=batch[0][1])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_ggcam(self):
layer = 'layer4'
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_ggcam'), backend='ggcam', layer=layer,
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
def test_gcampp(self):
layer = 'layer4'
model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gcampp'), backend='gcampp', layer=layer,
evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
model.eval()
data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
for i, batch in enumerate(data_loader):
_ = model(batch[0][0])
del model
gc.collect()
torch.cuda.empty_cache()
if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
if __name__ == '__main__':
unittest.main()
|
py | 1a4154be67fe6e74b3b9cc0aead1022c0b87497c | from django.db import models
# Create your models here.
class HumenManage(models.Manager):
def create_girl(self,name):
res = Humen.objects.create(
name = name,
age = 18,
money = 1
)
return res
class Humen(models.Model):
name = models.CharField(
max_length=30,
unique=True
)
age = models.IntegerField(
default=1
)
money = models.IntegerField(
default=0
)
#定义objects属性
# my_objects = models.Manager()
objects = models.Manager()
#实例化HumenManage对象
new_objects = HumenManage() |
py | 1a41554d675ab354748e7a8df514c7c31158c2ab | """
main.py
Core script for downloading and running MNIST Data, initializing and training various neural models, and logging
training statistics.
"""
from argparse import Namespace
from datetime import datetime
from pytorch_lightning.loggers import WandbLogger
from tap import Tap
from models.cnn import CNN
from models.feedforward import FeedForward
import pytorch_lightning as pl
class ArgumentParser(Tap):
# Weights & Biases Parameters
run_name: str = None # Informative Run-ID for WandB
project: str = 'mnist-skeleton' # Project Name for WandB Logging
data_dir: str = 'data/' # Where to download data
save_dir: str = 'checkpoints/' # Where to save WandB Artifacts
sync: bool = False # Boolean if developing (no WandB Logging!)
# GPUs
gpus: int = 0 # Number of GPUs to run with
# Model Parameters
model: str = 'cnn' # Model type to run -- one of < feedforward | cnn >
# FeedForward Network Parameters
ff_1: int = 128 # Number of neurons in first hidden layer
ff_2: int = 256 # Number of neurons in second hidden layer
# CNN Parameters
cnn_conv1: int = 10 # Number of Channels for First Convolution Layer
cnn_conv2: int = 20 # Number of Channels for Second Convolution Layer
kernel_size: int = 5 # Kernel Size (patch size) for Convolution Layers
cnn_ff: int = 50 # Number of neurons in projection layer after Convolution Layers
# Training Parameters
bsz: int = 64 # Batch Size
opt: str = 'adam' # Optimizer to use -- one of < adam | sgd >
lr: float = 0.001 # Learning Rate
def main():
# Parse Arguments --> Convert from Namespace --> Dict --> Namespace because of weird WandB Bug
args = Namespace(**ArgumentParser().parse_args().as_dict())
# Create Logger
if args.run_name is None:
run_name = "%s-%s-%d-%.1g" % (args.model, args.opt, args.bsz, args.lr) + '+' + \
datetime.now().strftime('%m/%d-[%H:%M]')
else:
run_name = args.run_name + '+' + datetime.now().strftime('%m/%d-[%H:%M]')
wandb = WandbLogger(name=run_name, save_dir=args.save_dir, project=args.project, offline=not args.sync)
# Create MNIST Module
if args.model == 'feedforward':
nn = FeedForward(args)
elif args.model == 'cnn':
nn = CNN(args)
# Prepare Data and Populate Data Loader
nn.prepare_data()
nn.train_dataloader()
# Create Trainer
trainer = pl.Trainer(default_save_path=args.save_dir, max_epochs=10, logger=wandb, gpus=args.gpus)
# Watch Histogram of Gradients
wandb.experiment.watch(nn, log='gradients', log_freq=100)
# Fit
trainer.fit(nn)
if __name__ == "__main__":
main()
|
py | 1a4155a6486f080d7343579fc07f265831b97a81 | #!/usr/bin/env python3
#coding:utf8
from sanic.log import logger
# 全局debug开关 如果通过sanic命令行开启会自动忽略该字段
# 通过命令行运行
# https://sanic.readthedocs.io/en/latest/sanic/deploying.html
# python3 -m sanic app.app --host=0.0.0.0 --port=9000 --worker=1 --debug
DEBUG_MODE=True
def create_app():
global DEBUG_MODE
import os
import logging
from sanic import Sanic
from sanic.response import json
from sanic.response import text
from api import blueprint as api
from api import views as api_views_base
app = Sanic()
app.blueprint(api)
if __name__ == "__main__":
loglevel = logging.DEBUG if DEBUG_MODE else logging.INFO
logger.setLevel(loglevel)
logger.debug("\tSTARTING DUMMY DEBUG MSG")
logger.info("\tSTARTING DUMMY INFO MSG")
logger.warn("\tSTARTING DUMMY WARN MSG")
logger.error("\tSTARTING DUMMY ERROR MSG")
logger.critical("STARTING DUMMY CRITICAL MSG")
for vclass in api_views_base:
v = vclass.as_view()
url = "/api/%s"%vclass.__url__.lstrip("/")
app.add_route(v, url)
for handler, (rule, router) in app.router.routes_names.items():
logger.info("Route: %s methods: %s name: %s"%(rule, '/'.join(list(router.methods)), router.name))
from base import request_middlewares, response_middlewares
logger.info("Resigtering request middlewares")
for ware in request_middlewares:
app.register_middleware(ware, attach_to="request")
logger.info("Resigtering response middlewares")
for ware in response_middlewares:
app.register_middleware(ware, attach_to="response")
logger.info("Register static dir to %s"%(os.path.realpath("./static")))
app.static('/static', './static')
return app
app = create_app()
if __name__ == "__main__":
app.run(debug=DEBUG_MODE, host="0.0.0.0", port=9000, access_log=True)
|
py | 1a415672633600a639d5e0e4dc83b9c910168e77 | import flyai
from flyai.train_helper import *
# submit("test", cmd="nvidia-smi")
# upload_data("../data/cifar-10-python.tar.gz", overwrite=True)
get_space() |
py | 1a41588f947096c3ea5fa0186347c036123de632 | import torch
from .Criterion import Criterion
from .utils import clear
class L1Cost(Criterion):
def __init__(self):
super(L1Cost, self).__init__()
self.output_tensor = torch.Tensor(1)
def updateOutput(self, input, target=None):
assert target is None
if self.output_tensor is None:
self.output_tensor = input.new(1)
self._backend.L1Cost_updateOutput(
self._backend.library_state,
input,
self.output_tensor
)
self.output = self.output_tensor[0]
return self.output
def updateGradInput(self, input, target=None):
assert target is None
self._backend.L1Cost_updateGradInput(
self._backend.library_state,
input,
None,
self.gradInput
)
return self.gradInput
def clearState(self):
clear(self, 'output_tensor')
return super(L1Cost, self).clearState()
|
py | 1a415b2bbbb28ce5e36eca75baa30543ebebfcdc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=18
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.rx(0.666017642561036).on(input_qubit[2])) # number=14
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=13
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy455.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() |
py | 1a415b48002cb50ef82a67be9231fdf57620b15b | # matrix_determinant 练习测试,作用,返回方阵的行列式 .行列式的概念可参考:https://www.jianshu.com/p/0fd8ac349b5e
import tensorflow as tf
import numpy as np
# 方阵
data = np.mat([[11.1,12.1],
[21.1,22.1]]);
with tf.Session() as sess:
z = tf.matrix_determinant(data);
print(sess.run(z));
|
py | 1a415c76677faed0de0dae0c2a0fdda92f20b57c | import json
source = "Île-de-France Mobilités 04/2019"
no_dataset_id = True
query = [('park_ride', 'yes')]
master_tags = ('amenity',)
max_distance = 800
max_request_boxes = 3
overpass_timeout = 550
def dataset(fileobj):
import codecs
source = json.load(codecs.getreader('utf-8-sig')(fileobj))
#source = json.load(fileobj)
data = []
for el in source:
lat = float(el['geometry']['coordinates'][1])
lon = float(el['geometry']['coordinates'][0])
tags = {
'amenity': 'parking',
'park_ride': 'yes',
'capacity': el['fields']['nb_pl_pr'],
'official_name': el['fields']['nom_pr']
}
data.append(SourcePoint(el['recordid'], lat, lon, tags))
return data
# Example line of the source JSON:
# {
# "datasetid": "parcs-relais-idf",
# "recordid": "fe9680496370980cb966e3bca09793b443915fd8",
# "fields": {
# "www": "www.saint-quentin-en-yvelines.fr",
# "nb_pl_elec": 0.0,
# "nb_pl_pr": 219.0,
# "moa_pr": "CASQY",
# "nom_lda": "Saint-Quentin-en-Yvelines (Gare)",
# "nom_comm": "Montigny-le-Bretonneux",
# "nb_pl_2rm": 0.0,
# "mes_date": "2014-03-24T01:00:00+01:00",
# "mes_annee": 2014.0,
# "nom_gare": "SAINT-QUENTIN-EN-YVELINES (SNCF)",
# "nb_pl_cov": 0.0,
# "label_pr": 1.0,
# "gestion_pr": "Q-Park",
# "nom_pr": "Jol Le Theule",
# "struct_pr": "ouvrage",
# "nom_zdl": "Saint-Quentin-en-Yvelines (Avenue des Prs)",
# "id_ref_lda": 63812.0,
# "id_pr": 35.0,
# "geo_shape": {
# "type": "MultiPoint",
# "coordinates": [
# [2.0439044, 48.78620339998614, 0.0]
# ]
# },
# "id_ref_zdl": 43249.0,
# "nb_pl_pmr": 6.0,
# "adres_pr": "10 Rue Jol le Theule, 78180 Montigny-le-Bretonneux",
# "geo_point_2d": [48.78620339998614, 2.0439044],
# "nb_pl_v": 0.0,
# "insee_t": "78423"
# },
# "geometry": {
# "type": "Point",
# "coordinates": [2.0439044, 48.78620339998614]
# },
# "record_timestamp": "2019-02-19T16:15:48+01:00"
# }
|
py | 1a415c914baa7bdfb363ebaa071bf1c0a808b52e | # cifar10_svm.py
# Support Vector Machine (SVM)
import time
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import model_selection
from scipy.io import loadmat
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import hinge_loss
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
def run_svc(svc, title):
# Fit model
start = time.time()
svc.fit(x_train, y_train)
end = time.time()
print("\nModel took %0.2f seconds to train"%(end - start))
# Calculate predictions
start = time.time()
predicted = svc.predict(x_test)
end = time.time()
print("Model took %0.2f seconds to calculate predictions"%(end - start))
# Output results
print('\naccuracy', accuracy_score(y_test, predicted))
print('\nSVM Results for ' + title)
print('\nConfusion Matrix:')
print(confusion_matrix(y_test, predicted))
print('\nClassification Report:', classification_report(y_test, predicted))
#print("Hinge loss", hinge_loss(y_test, predicted))
# Load datasets from file
npzfile = np.load('cifar10.npz')
print(npzfile.files)
x_train = npzfile['x_train']
x_test = npzfile['x_test']
y_train = npzfile['y_train']
y_test = npzfile['y_test']
# Standardize the columns
x_train = x_train / 255
x_test = x_test / 255
# The model cannot deal with 2D array so we have to convert to 1D array.
x_train_flat = np.empty(shape=[x_train.shape[0]] + [3072], dtype='float32')
for i in range(x_train.shape[0]):
x_train_flat[i,:] = x_train[i,:,:].flatten()
# Flatten x_test array
x_test_flat = np.empty(shape=[x_test.shape[0]] + [3072], dtype='float32')
for i in range(x_test.shape[0]):
x_test_flat[i,:] = x_test[i,:,:].flatten()
x_train = x_train_flat
x_test = x_test_flat
y_train = y_train.ravel()
y_test = y_test.ravel()
print('\n', type(x_train))
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
# Linear
svc = SVC(kernel='linear', C=1)
run_svc(svc, 'Linear')
# Radial Basis Function (RBF)
svc = SVC(kernel='rbf', gamma=1, C=1)
run_svc(svc, 'Radial Basis Function (RBF)')
# Polynomial
svc = SVC(kernel='poly', degree=5, C=1)
run_svc(svc, 'Polynomial)')
|
py | 1a41612b995b2cbdc4c6eadb35b1cda0eff03bbf | from infi.clickhouse_orm import migrations
from ee.clickhouse.sql.session_recording_events import SESSION_RECORDING_EVENTS_MATERIALIZED_COLUMN_COMMENTS_SQL
from posthog.client import sync_execute
from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_REPLICATION
def create_has_full_snapshot_materialized_column(database):
if CLICKHOUSE_REPLICATION:
sync_execute(
f"""
ALTER TABLE sharded_session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8 MATERIALIZED JSONExtractBool(snapshot_data, 'has_full_snapshot')
"""
)
sync_execute(
f"""
ALTER TABLE session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8
"""
)
else:
sync_execute(
f"""
ALTER TABLE session_recording_events
ON CLUSTER '{CLICKHOUSE_CLUSTER}'
ADD COLUMN IF NOT EXISTS
has_full_snapshot Int8 MATERIALIZED JSONExtractBool(snapshot_data, 'has_full_snapshot')
"""
)
sync_execute(SESSION_RECORDING_EVENTS_MATERIALIZED_COLUMN_COMMENTS_SQL())
operations = [migrations.RunPython(create_has_full_snapshot_materialized_column)]
|
py | 1a4161c8099d2264ff4d025f08e7ecd2a4418f29 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple07.xlsx')
def test_write_nan(self):
"""Test write with NAN/INF. Issue #30"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string(0, 0, 'Foo')
worksheet.write_number(1, 0, 123)
worksheet.write_string(2, 0, 'NAN')
worksheet.write_string(3, 0, 'nan')
worksheet.write_string(4, 0, 'INF')
worksheet.write_string(5, 0, 'infinity')
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test write with NAN/INF. Issue #30"""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.write_string(0, 0, 'Foo')
worksheet.write_number(1, 0, 123)
worksheet.write_string(2, 0, 'NAN')
worksheet.write_string(3, 0, 'nan')
worksheet.write_string(4, 0, 'INF')
worksheet.write_string(5, 0, 'infinity')
workbook.close()
self.assertExcelEqual()
|
py | 1a41620a6a0fc5057a29a1b160254f6652496518 | import threading
import time
from scripts import Warnings, Log, Constants
job_id = 0
def get_job_id():
global job_id
job_id += 1
return job_id
class Job:
"""
Job for storing long-lasting tasks
"""
def __init__(self, title, info=None):
Log.debug("Created Job task with title '" + title + "'.")
self._id = get_job_id()
self._title = title
self._progress_count = 0
self._progress_max = 0
self._info = info
self._progress_message = ""
def perform_task(self):
Warnings.not_overridden()
"""
Setters
"""
def set_max_progress(self, count):
self._progress_max = count
def set_progress(self, count, message=None):
Log.trace("Setting the progress to: " + str(count) + "/" + str(self._progress_max) + " - " + str(message))
if message is not None:
self._progress_message = message
self._progress_count = count
def complete_progress(self, message=None):
Log.trace("Setting the progress to: " +
str(self._progress_max) + "/" + str(self._progress_max) + " - " + str(message))
if message is not None:
self._progress_message = message
self._progress_count = self._progress_max
def add_progress(self, count, message=None):
if message is not None:
self._progress_message = message
self._progress_count += count
Log.trace("Adding progress. Setting it to: " + str(self._progress_count) + "/" + str(self._progress_max)
+ " - " + str(message))
"""
Getters
"""
def get_id(self):
return self._id
def get_title(self):
return self._title
def get_progress(self):
return self._progress_count
def get_max_progress(self):
return self._progress_max
def get_progress_message(self):
return self._progress_message
def get_info(self):
return self._info
class SimultaneousJobs(Job):
"""
A type of job container for executing multiple jobs on separate threads (at the same time)
"""
class ThreadedJob(threading.Thread):
def __init__(self, job):
threading.Thread.__init__(self)
self._job = job
self._is_complete = False
def run(self):
self._job.perform_task()
self._is_complete = True
def get_job(self):
return self._job
def is_complete(self):
return self._is_complete
def __init__(self, jobs, title, progress_message=None, complete_message=None, info=None,
check_delay=Constants.UPDATE_PROGRESS_BAR_FREQ):
Job.__init__(self, title=title, info=info)
self._progress_message = progress_message
self._complete_message = complete_message
self._check_delay = check_delay
# Creates threaded jobs
self._threaded_jobs = []
for j in jobs:
self._threaded_jobs.append(SimultaneousJobs.ThreadedJob(job=j))
# Set max progress
max_progress = 0
for j in self._threaded_jobs:
max_progress += j.get_job().get_max_progress()
self.set_max_progress(max_progress)
def perform_task(self):
# Starts job threads
for j in self._threaded_jobs:
j.start()
# Waits until all jobs are complete
all_complete = False
while all_complete is False:
# Checks if a sub-task is complete
all_complete = True
for j in self._threaded_jobs:
all_complete &= j.is_complete()
# Compute progress
progress = 0
for j in self._threaded_jobs:
progress += j.get_job().get_progress()
self.set_progress(progress, self._progress_message)
# Sleeps
time.sleep(self._check_delay)
print("Simultaneous Task Status:", all_complete)
# Task is complete
self.complete_progress(self._complete_message)
|
py | 1a41634fe347a658ee3fb48f56863e2cffc97fec | # standart modules
import threading
import struct
import os
# blender modules
import bpy
import bmesh
# addon modules
import taichi as ti
import numpy as np
from .engine import mpm_solver
from . import types
from . import particles_io
from . import nodes
WARN_SIM_NODE = 'Node tree must not contain more than 1 "Simulation" node.'
WARN_NOT_SIM_NODE = 'Node tree does not have "Simulation" node.'
mpm_solver.USE_IN_BLENDER = True
IMPORT_NODES = (
'elements_particles_mesh_node',
'elements_particles_system_node'
)
# sim_node - simulation node
def get_cache_folder(operator, sim_node):
# particles socket
par_s = sim_node.outputs['Simulation Data']
cache_nodes = []
has_cache_node = False
if par_s.is_linked:
for link in par_s.links:
# disk cache node
disk = link.to_node
if disk.bl_idname == nodes.ElementsCacheNode.bl_idname:
cache_nodes.append(disk)
if not len(cache_nodes):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree does not have "Cache" node.'
)
return None, has_cache_node
elif len(cache_nodes) > 1:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Node tree must not contain more than 1 "Cache" node.'
)
return None, has_cache_node
else:
cache_node = cache_nodes[0]
has_cache_node = True
folder_raw = cache_node.inputs['Folder'].get_value()[0]
folder = bpy.path.abspath(folder_raw)
return folder, has_cache_node
# get simulation nodes tree object
def get_tree_obj(node_tree):
# simulation nodes tree object
tree = types.Tree()
for node in node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
tree.sim_nds[node.name] = node
elif node.bl_idname in IMPORT_NODES:
if node.bl_idname == 'elements_particles_system_node':
import_type = 'PAR_SYS'
elif node.bl_idname == 'elements_particles_mesh_node':
import_type = 'PAR_MESH'
node.get_class()
tree.imp_nds[node.name] = node, import_type
elif node.bl_idname == 'elements_cache_node':
tree.cache_nds[node.name] = node
return tree
def create_emitter(operator, solv, emitter, vel):
# source object
src_obj = emitter.source_object
if not src_obj:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have source object.'
)
return
obj_name = src_obj.obj_name
obj = bpy.data.objects.get(obj_name)
if not obj:
operator.is_finishing = True
if not obj_name:
operator.report(
{'WARNING'},
'Emmiter source object not specified.'
)
else:
operator.report(
{'WARNING'},
'Cannot find emmiter source object: "{}".'.format(obj_name)
)
return
if obj.type != 'MESH':
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object is not mesh: "{}".'.format(obj.name)
)
return
if not emitter.material:
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter not have material.'
)
return
if not len(obj.data.polygons):
operator.is_finishing = True
operator.report(
{'WARNING'},
'Emmiter source object not have polygons: "{}"'.format(obj.name)
)
return
b_mesh = bmesh.new()
b_mesh.from_mesh(obj.data)
bmesh.ops.triangulate(b_mesh, faces=b_mesh.faces)
# emitter triangles
tris = []
for face in b_mesh.faces:
# triangle
tri = []
# v - bmesh vertex
for v in face.verts:
# final vertex coordinate
v_co = obj.matrix_world @ v.co
tri.extend(v_co)
tris.append(tri)
b_mesh.clear()
tris = np.array(tris, dtype=np.float32)
# material type
mat = emitter.material.typ
# taichi material
ti_mat = mpm_solver.MPMSolver.materials.get(mat, None)
if ti_mat is None:
assert False, mat
# emitter particles color
red = int(emitter.color[0].r * 255) << 16
green = int(emitter.color[0].g * 255) << 8
blue = int(emitter.color[0].b * 255)
color = red | green | blue
# add emitter
solv.add_mesh(
triangles=tris,
material=ti_mat,
color=color,
velocity=vel,
emmiter_id=operator.emitter_indices[emitter]
)
return True
class ELEMENTS_OT_SimulateParticles(bpy.types.Operator):
bl_idname = "elements.simulate_particles"
bl_label = "Simulate"
device: bpy.props.EnumProperty(
name='Device',
default='cpu',
items=(
('gpu', 'GPU', 'Run on GPU, automatically detect backend'),
('cuda', 'CUDA', 'Run on GPU, with the NVIDIA CUDA backend'),
('opengl', 'OpenGL', 'Run on GPU, with the OpenGL backend'),
('metal', 'Metal', 'Run on GPU, with the Apple Metal backend, if you are on macOS'),
('cpu', 'CPU', 'Run on CPU (default)')
)
)
device_memory_fraction: bpy.props.FloatProperty(
name='Device Memory',
default=50.0,
min=10.0,
max=100.0,
subtype='PERCENTAGE'
)
def __init__(self):
self.timer = None
self.thread = None
self.is_runnig = False
self.is_finishing = False
self.event_type = 'DEFAULT'
def create_emitters(self, frame):
for emitter in self.emitters:
if len(emitter.velocity) == 1:
vel = emitter.velocity[0]
else:
vel = emitter.velocity[frame]
if emitter.typ == 'EMITTER':
if emitter.emit_frame[0] == frame:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
elif emitter.typ == 'INFLOW':
if type(emitter.enable) == float:
enable = emitter.enable
else:
if len(emitter.enable) == 1:
index = 0
else:
index = frame
enable = bool(int(round(emitter.enable[index], 0)))
if enable:
correct_emmiter = create_emitter(self, self.solv, emitter, vel)
if not correct_emmiter:
return self.cancel(bpy.context)
return True
def save_particles(self, frame, np_x, np_v, np_color, np_material, np_emitters):
if not os.path.exists(self.cache_folder):
os.makedirs(self.cache_folder)
# file name
fname = 'particles_{0:0>6}'.format(frame)
# particle file path
pars_fpath = os.path.join(self.cache_folder, fname)
# particles data
par_data = {
particles_io.POS: np_x,
particles_io.VEL: np_v,
particles_io.COL: np_color,
particles_io.MAT: np_material,
particles_io.EMT: np_emitters,
}
data = particles_io.write_pars_v1(par_data, pars_fpath, fname)
with open(pars_fpath + '.bin', 'wb') as file:
file.write(data)
write_obj = False
if write_obj:
with open(pars_fpath + '.obj', 'w') as f:
for i in range(pars_cnt):
x = np_x[i]
print(f'v {x[0]} {x[1]} {x[2]}', file=f)
def run_sim(self):
# self.frame_end + 1 - this means include the last frame in the range
for frame in range(self.frame_start, self.frame_end + 1, 1):
if self.event_type == 'ESC':
print('STOP SIMULATION')
self.thread = None
self.is_finishing = True
self.cancel(bpy.context)
return
print('Frame: {}'.format(frame))
is_correct = self.create_emitters(frame)
if not is_correct is True:
return self.cancel(bpy.context)
# generate simulation state at t = 0
# particles
pars = self.solv.particle_info()
np_x = pars['position']
np_v = pars['velocity']
np_material = pars['material']
np_color = pars['color']
np_emitters = pars['emitter_ids']
# and then start time stepping
self.solv.step(1 / self.fps)
print(np_x)
self.save_particles(
frame,
np_x,
np_v,
np_color,
np_material,
np_emitters
)
def init_sim(self):
# simulation nodes
sim = []
for node in self.node_tree.nodes:
if node.bl_idname == 'elements_simulation_node':
sim.append(node)
if not len(sim):
self.report({'WARNING'}, WARN_NOT_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
elif len(sim) > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return self.cancel(bpy.context)
else:
inputs = sim[0].inputs
self.scene.elements_frame_start = inputs['Frame Start'].get_value()[0]
self.scene.elements_frame_end = inputs['Frame End'].get_value()[0]
self.is_runnig = True
self.scene.elements_nodes.clear()
tree = get_tree_obj(self.node_tree)
# simulation nodes count
sim_nodes_cnt = len(tree.sim_nds)
if sim_nodes_cnt != 1:
if sim_nodes_cnt > 1:
self.report({'WARNING'}, WARN_SIM_NODE)
self.is_finishing = True
return
sim = list(tree.sim_nds.values())[0]
if not sim:
return self.cancel(bpy.context)
sim.get_class()
# simulation class
cls, _ = self.scene.elements_nodes[sim.name]
self.cache_folder, has_cache_node = get_cache_folder(self, sim)
if not has_cache_node:
return self.cancel(bpy.context)
if not self.cache_folder and has_cache_node:
self.report({'WARNING'}, 'Cache folder not specified')
self.is_finishing = True
return self.cancel(bpy.context)
self.frame_start = cls.frame_start[0]
self.frame_end = cls.frame_end[0]
self.fps = cls.fps[0]
# TODO: list is not implemented
if not cls.solver:
self.report(
{'WARNING'},
'Node tree does not have "MPM Solver" node.'
)
self.is_finishing = True
return {'FINISHED'}
res = cls.solver.resolution[0]
size = cls.solver.size[0]
ti.reset()
arch = getattr(ti, self.device)
mem = self.device_memory_fraction / 100
ti.init(arch=arch, device_memory_fraction=mem)
print(f"Creating simulation of res {res}, size {size}")
solv = mpm_solver.MPMSolver(
(res, res, res),
size=size,
unbounded=True,
use_emitter_id=True
)
solv.set_gravity(tuple(cls.gravity[0]))
self.emitters = cls.emitters
if not self.emitters:
self.report({'WARNING'}, 'Node tree not have emitters.')
self.is_finishing = True
return self.cancel(bpy.context)
self.emitter_indices = {}
for index, emitter in enumerate(self.emitters):
self.emitter_indices[emitter] = index
if cls.colliders:
for collider in cls.colliders:
direct = collider.direction[0]
if not direct[0] and not direct[1] and not direct[2]:
direct = (0, 0, 1)
frict = collider.friction[0]
if frict < 0:
frict = 0
elif frict > 1:
frict = 1
solv.add_surface_collider(
tuple(collider.position[0]),
tuple(direct),
surface=collider.surface,
friction=frict
)
self.size = size
self.solv = solv
self.run_sim()
def launch_sim(self):
self.thread = threading.Thread(target=self.init_sim, args=())
self.thread.start()
def modal(self, context, event):
if event.type == 'ESC':
self.event_type = 'ESC'
if not self.is_runnig:
self.launch_sim()
if self.is_finishing:
self.cancel(context)
return {'FINISHED'}
return {'PASS_THROUGH'}
def execute(self, context):
self.node_tree = context.space_data.node_tree
self.scene = context.scene
context.window_manager.modal_handler_add(self)
win = context.window
self.timer = context.window_manager.event_timer_add(1.0, window=win)
return {'RUNNING_MODAL'}
def cancel(self, context):
if self.timer:
context.window_manager.event_timer_remove(self.timer)
self.timer = None
self.thread = None
self.is_finishing = True
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self)
# operators draw function
def op_draw_func(self, context):
if context.space_data.node_tree:
if context.space_data.node_tree.bl_idname == 'elements_node_tree':
self.layout.operator('elements.simulate_particles')
self.layout.operator('elements.stable_render_animation')
class ELEMENTS_OT_StableRenderAnimation(bpy.types.Operator):
bl_idname = 'elements.stable_render_animation'
bl_label = 'Render'
bl_description = 'Stable Render Animation'
@classmethod
def poll(cls, context):
# space data
spc_data = context.space_data
if spc_data.node_tree:
return spc_data.node_tree.bl_idname == 'elements_node_tree'
def execute(self, context):
scn = context.scene
rend = scn.render
rend.image_settings.file_format = 'PNG'
# output folder
out = rend.filepath
for frm in range(scn.frame_start, scn.frame_end + 1):
file_name = '{0:0>4}.png'.format(frm)
file_path = os.path.join(bpy.path.abspath(out), file_name)
if rend.use_overwrite or not os.path.exists(file_path):
print('Render Frame:', frm)
scn.frame_set(frm)
bpy.ops.render.render(animation=False)
for image in bpy.data.images:
if image.type == 'RENDER_RESULT':
image.save_render(file_path, scene=scn)
bpy.data.images.remove(image)
return {'FINISHED'}
operator_classes = [
ELEMENTS_OT_SimulateParticles,
ELEMENTS_OT_StableRenderAnimation
]
def register():
for operator_class in operator_classes:
bpy.utils.register_class(operator_class)
def unregister():
for operator_class in reversed(operator_classes):
bpy.utils.unregister_class(operator_class)
|
py | 1a4163542a6ed8fdaac535881c221fbb80019dd7 | import sys
import os
print('Welcome User ....')
print('yt : for browsing youtube\n'+
'news : for browsing Google News\n'+
#'run : for running codes on GeeksforGeeks Online IDE\n'+
'shuffle : to shuffle play the songs of your favorite artists\n'+
'play : to shuffle play songs from your playlist\n'+
'artist : to go to your favorite artist menu\n'+
'lyrics : to go to lyrics menu\n'+
'maps : to use Google Maps\n'+
'exit : to exit')
while True:
choice = input('...$ ')
if choice.lower() == 'yt':
os.system("start cmd /C python yt.py")
elif choice.lower() == 'news':
os.system("start cmd /K python driver.py")
elif choice.lower() == 'shuffle':
os.system("start cmd /C python favart.py")
elif choice.lower() == 'play':
os.system("start cmd /C python playlist.py")
elif choice.lower() == 'artist':
os.system("start cmd /C python artist.py")
elif choice.lower() == 'exit':
sys.exit('USER CHOSE TO QUIT')
elif choice.lower() == 'lyrics':
os.system("start cmd /K python lyrics.py")
elif choice.lower() == 'maps':
os.system("start cmd /C python maps.py") |
py | 1a4163940204c2dae6f2565217ec60756203481c | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-17 19:29
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import deletion
def fwd(app, schema_editor):
Event = app.get_model('pretixbase', 'Event')
for e in Event.objects.select_related('organizer').all():
e.invoices.all().update(prefix=e.slug.upper() + '-', organizer=e.organizer)
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0068_subevent_frontpage_text'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='prefix',
field=models.CharField(db_index=True, default='', max_length=160),
preserve_default=False,
),
migrations.AddField(
model_name='invoice',
name='organizer',
field=models.ForeignKey(null=True,
on_delete=deletion.PROTECT,
related_name='invoices', to='pretixbase.Organizer'),
preserve_default=False,
),
migrations.RunPython(
fwd, migrations.RunPython.noop
),
migrations.AlterUniqueTogether(
name='invoice',
unique_together=set([('organizer', 'prefix', 'invoice_no')]),
),
migrations.AlterField(
model_name='invoice',
name='organizer',
field=models.ForeignKey(on_delete=deletion.PROTECT, related_name='invoices', to='pretixbase.Organizer'),
),
]
|
py | 1a4164a14e2d8bae8b993deb3064c9275f56a2d7 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# datetime:2020/10/23 11:54
# software: PyCharm
# project: webapi
from apiserver.report.handler.error_log_handler import ErrorLogHandler
from apiserver.report.handler.heartbeat_handler import HeartBeatHandler
from apiserver.report.handler.narmal_vul_handler import NormalVulnHandler
from apiserver.report.handler.saas_method_pool_handler import SaasMethodPoolHandler
from apiserver.report.handler.sca_handler import ScaHandler
from apiserver.report.handler.api_route_handler import ApiRouteHandler
if __name__ == '__main__':
ErrorLogHandler()
HeartBeatHandler()
ScaHandler()
NormalVulnHandler()
SaasMethodPoolHandler()
ApiRouteHandler()
|
py | 1a4165a8ae90443bbcb543261458f48980bfbd56 | """Testing Deep Learning with Graph Neural Networks."""
import logging
import logging.config
import os
import sys
import matplotlib.pyplot as plt # this is for making the graph
import networkx as nx
import numpy as np
# import pandas as pd
import pygraphviz as pgv # sudo apt install libgraphviz-dev
from gnn.lib.common import CommonHelpers
from gnn.lib.data import DataHelpers, DataObject
from networkx.drawing.nx_agraph import graphviz_layout, write_dot
logging.config.fileConfig(
"logging.conf",
defaults={"logfilename": "training.log"},
disable_existing_loggers=True, # this will prevent modules from writing to our logger
)
logger = logging.getLogger("train")
def main():
"""Testing Deep Learning with Graph Neural Networks."""
data_helper = DataHelpers()
data_obj = DataObject()
common_helper = CommonHelpers()
# load the data files in from datastore
workdir = os.getcwd() + "/dataset/"
logger.info("Using workdir: {}".format(workdir))
created = common_helper.make_directory(
workdir
) # create the working directory if needed
bucket_name = "backend-datastore"
prefix = "test1/" # testing with a top level folder in storage bucket
# common_helper.download_to_local(workdir, bucket_name, prefix) # Make a flag for pulling remote data
data_helper.gather_dotfiles(workdir)
if data_helper.dot_files is None:
logger.info("No data files found.")
sys.exit(1)
for dot in data_helper.dot_files:
logger.info("Processing dot file: {}".format(dot))
this_uuid = dot.split(".")
data_obj = DataObject()
data_obj.my_uuid = this_uuid[0]
##############
# pygraphviz #
##############
gv = data_helper.create_graph(
workdir, dot
) # write the terraform digraph to a dot file
############
# Networkx #
############
options = {"edgecolors": "tab:gray", "node_size": 800, "alpha": 0.9}
G = nx.DiGraph(
gv, name=data_obj.my_uuid, node_color="tab:red", **options
) # Networkx can accept the pygraphviz dot format
nodelist = list(G.nodes(data=True))
# print(nodelist)
print(
"+++++ Sorted nodelist +++++\n", sorted(d for n, d in G.degree())
) # sorted list
# logger.debug(nx.clustering(G)) # cluster list
data_obj.node_count = G.number_of_nodes()
logger.debug("Node count: {}".format(data_obj.node_count))
data_obj.edge_count = G.number_of_edges()
logger.debug("Edge count: {}".format(data_obj.edge_count))
data_obj.density = G.number_of_edges() / (
G.number_of_nodes() * (G.number_of_nodes() - 1)
)
logger.debug(
"Graph density: {}".format(data_obj.density)
) # d (0 ≤ d ≤ 1 ) tells how close a graph is to being "complete"
# diameter D is the largest distance between any two nodes in the graph
data_helper.data_obj_update(
workdir, data_obj
) # update the data file for this graph
##########################################
# convert nx digraph to pandas dataframe #
##########################################
# df = nx.to_pandas_dataframe(DG)
# df = pd.DataFrame.from_dict(dict(G.nodes(data=True)), orient="index")
# print("+++++ Pandas Dataframe Values +++++\n", df.values)
# move this to the draw function
# plt.savefig(workdir + data_obj.my_uuid + ".plt.png")
# plt.show() # use this in Jupyter
####################
# Adjacency Matrix #
####################
A = nx.adjacency_matrix(G) # requires scipy module
# print(am)
# print(A.todense())
# A.setdiag(A.diagonal() * 2)
print("+++++ Adjacency Matrix ++++\n", A)
print("+++++ Dense Adj Matrix +++++\n", A.todense())
####################
# Incidence Matrix #
####################
I = nx.incidence_matrix(G)
print("+++++ Incidence Matrix +++++\n", I)
print("+++++ Dense Incidence Matrix +++++\n", I.todense())
""" Degree Matrix
Adding the inverse of the degree matrix ensures inclusion of root node.
"""
# Laplacian Matrix (L = D - A)
# L = nx.laplacian_matrix(DG)
numpy_recarray = nx.to_numpy_matrix(
G
) # graph adjacency matrix as a NumPy matrix.
AA = np.matrix(numpy_recarray)
X = np.matrix([[i, -i] for i in range(AA.shape[0])], dtype=float)
print(A * X) # apply propagation rule
if __name__ == "__main__":
main()
"""
__author__ = 'Franklin'
__version__ = '0.1'
__email__ = 'Franklin <[email protected]>'
"""
|
py | 1a4165d07756e5bf273661a28a6d356500d12797 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
epsilon = 1e-8
def whitening(image):
"""Whitening
Normalises image to zero mean and unit variance
Parameters
----------
image : np.ndarray
image to be whitened
Returns
-------
np.ndarray
whitened image
"""
ret = (image - np.mean(image)) / (np.std(image) + epsilon)
return ret
def normalise_zero_one(image):
"""Image normalisation
Normalises image to fit [0, 1] range
Parameters
----------
image : np.ndarray
image to be normalised
Returns
-------
np.ndarray
normalised image
"""
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) + epsilon)
return ret
def normalise_one_one(image):
"""Image normalisation
Normalises image to fit [-1, 1] range
Parameters
----------
image : np.ndarray
image to be normalised
Returns
-------
np.ndarray
normalised image
"""
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def resize_image_with_crop_or_pad(image, img_size=[64,64,64], **kwargs):
"""Image resizing
Resizes image by cropping or padding dimension to fit specified size.
Parameters
----------
image : np.ndarray
image to be resized
img_size : list or tuple
new image size
kwargs
additional arguments to be passed to np.pad
Returns
-------
np.ndarray
resized image
"""
assert isinstance(image, (np.ndarray, np.generic))
assert (image.ndim - 1 == len(img_size) or image.ndim == len(img_size)), \
'Example size doesnt fit image size'
# find image dimensionality
rank = len(img_size)
# create placeholders for new shape
from_indices = [[0, image.shape[dim]] for dim in range(rank)]
to_padding = [[0, 0] for dim in range(rank)]
slicer = [slice(None)] * rank
for i in range(rank):
# for each dimensions find whether it is supposed to be cropped or padded
if image.shape[i] < img_size[i]:
to_padding[i][0] = (img_size[i] - image.shape[i]) // 2
to_padding[i][1] = img_size[i] - image.shape[i] - to_padding[i][0]
else:
from_indices[i][0] = int(np.floor((image.shape[i] - img_size[i]) / 2.))
from_indices[i][1] = from_indices[i][0] + img_size[i]
# create slicer object to crop or leave each dimension
slicer[i] = slice(from_indices[i][0], from_indices[i][1])
# pad the cropped image to extend the missing dimension
return np.pad(image[slicer], to_padding, **kwargs) |
py | 1a41661f00bde5b7e1083829990ee1e5b9d7fa02 | """
0123. Best Time to Buy and Sell Stock III
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [3,3,5,0,0,3,1,4]
Output: 6
Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
"""
class Solution:
def maxProfit(self, prices: List[int]):
buy_1 = buy_2 = float('inf')
pro_1 = pro_2 = 0
for p in prices:
buy_1 = min(buy_1, p)
pro_1 = max(pro_1, p - buy_1)
buy_2 = min(buy_2, p - pro_1)
pro_2 = max(pro_2, p - buy_2)
return pro_2
|
py | 1a41664cb2d16603ab5cc0b16c04b8b68d835996 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import numpy as np
from caffe2.python import utils, workspace
from caffe2.quantization.server import dnnlowp_pybind11
from hypothesis import assume
# This function asserts quantized results (output[1:]) are close enough to
# floating point results (output[0]).
# The error bound is derived based on assumption that there's no input
# quantization error.
def check_quantized_results_close(outputs, ref=None, symmetric=False, atol_scale=0.53):
if ref is None:
ref = outputs[0][0]
if ref.size == 0:
return
ref_min = min(np.min(ref), 0)
ref_max = max(np.max(ref), 0)
if symmetric:
ref_scale = 2 * max(abs(ref_max), abs(ref_min)) / 255
else:
ref_scale = (ref_max - ref_min) / 255
# should be divided by 2 in an exact math, but divide by 1.9 here
# considering finite precision in floating-point numbers
atol = ref_scale * atol_scale
for o in outputs[1:]:
np.testing.assert_allclose(o[0], outputs[0][0], atol=atol, rtol=0)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
# Make sure we won't have overflows from vpmaddubsw instruction used in fbgemm)
def avoid_vpmaddubsw_overflow_fc(
batch_size, input_channels, output_channels, X, X_min, X_max, W, W_min, W_max
):
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 > (1 << 15) - 1:
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[j, k + 1] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for i, j in np.ndindex((batch_size, output_channels)):
for k in range(0, input_channels // 2 * 2, 2):
x0 = X[i, k] - X_min
x1 = X[i, k + 1] - X_min
w0 = W[j, k] - 128 - W_min
w1 = W[j, k + 1] - 128 - W_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# Make sure we won't have overflows from vpmaddubsw instruction used in
# fbgemm (FIXME: this assumes fbgemm is used only for NHWC and im2col
# is done in a way that input_channels is the fastest moving
# dimension).
#
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels,
output_channels,
batch_size,
X,
X_min,
X_max,
W,
W_min,
W_max,
):
ndim = len(sizes)
dkernels = tuple((dilations[i] * (kernels[i] - 1) + 1) for i in range(ndim))
size_cols = tuple(
(sizes[i] + 2 * pads[i] - dkernels[i]) // strides[i] + 1 for i in range(ndim)
)
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
if x0 * w0 + x1 * w1 < -(1 << 15):
w1_adjusted = (-(1 << 15) - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
elif x0 * w0 + x1 * w1 >= (1 << 15):
w1_adjusted = ((1 << 15) - 1 - float(x0) * w0) / x1
W[(oc,) + f1 + (ic1,)] = int(w1_adjusted) + 128 + W_min
# Go through the same loop again to double check we don't have any overflow
for out_idx in np.ndindex((batch_size,) + size_cols + (output_channels,)):
b = out_idx[0]
oc = out_idx[-1]
o_spatial = out_idx[1:-1]
for filter_idx1, filter_idx2 in pairwise(
np.ndindex(kernels + (input_channels,))
):
f0 = filter_idx1[:-1]
ic0 = filter_idx1[-1]
f1 = filter_idx2[:-1]
ic1 = filter_idx2[-1]
i0s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f0[i]
for i in range(ndim)
)
i1s = tuple(
strides[i] * o_spatial[i] - pads[i] + dilations[i] * f1[i]
for i in range(ndim)
)
w0 = W[(oc,) + f0 + (ic0,)] - 128 - W_min
w1 = W[(oc,) + f1 + (ic1,)] - 128 - W_min
if all(0 <= i0s[i] < sizes[i] for i in range(ndim)):
x0 = X[(b,) + i0s + (ic0,)] - X_min
else:
# padding
x0 = -X_min
if all(0 <= i1s[i] < sizes[i] for i in range(ndim)):
x1 = X[(b,) + i1s + (ic1,)] - X_min
else:
# padding
x1 = -X_min
assert -(1 << 15) <= x0 * w0 + x1 * w1 < (1 << 15)
# strides, pads, kernels, dilations, and sizes should be tuples with the same dimension
# (2 for 2D conv, 3 for 3D conv, and so on)
def generate_convnd_inputs(
strides,
pads,
kernels,
dilations,
sizes,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
dim = len(sizes)
assume(all(len(a) == dim for a in [strides, pads, kernels, dilations]))
assume(all(sizes[d] >= dilations[d] * (kernels[d] - 1) + 1 for d in range(dim)))
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
depthwise_convolution = (
input_channels_per_group == 1 and output_channels_per_group == 1
)
assert input_channels > 1
assert output_channels > 1
# X and W have scale 1, so exactly represented after quantization
X_min = 0 if preserve_activation_sparsity else -77
X_max = X_min + 255
X_range = X_max - X_min
if depthwise_convolution and groupwise_quantization:
# For depthwise convolution, it's not enough to set input channel 0
# to all X_min to avoid overflow from vpmaddubsw
X_range /= 2
X = np.round(
np.random.rand(*((batch_size,) + tuple(sizes) + (input_channels,))) * X_range
+ X_min
)
X = X.astype(np.float32)
if (
batch_size != 0
and depthwise_convolution
and groupwise_quantization
and not preserve_activation_sparsity
):
# Put X_max in a position not to be paired with any padded value.
# Put X_min to all positions that can be paired with the X_max value.
#
# This is an example of a pattern for 3x3x3
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . min
#
# . . . . .
# . . . . min
# . min max min .
# min . . . .
# . . . . .
#
# min . . . .
# . . . . .
# . . . . .
# . . . . .
# . . . . .
# Make sure we have enough dimension
assert X.shape[1] >= 3
assert all(X.shape[d + 1] >= kernels[d] + 2 for d in range(1, dim))
# Take subtensor we want to manipulate
X_sub = X[(0,) * (X.ndim - dim - 1) + (slice(None),) * dim + (0,)]
# Put X_max in the middle of the subtensor
X_sub[(1,) + tuple(kernels[d] // 2 + 1 for d in range(1, dim))] = X_max
# Put X_min to the positions that can be paired with X_max across
# the slowest moving dimension
X_sub[[[0, 2]] + [[kernels[d] + 1, 0] for d in range(1, dim)]] = X_min
# Put X_min to other positions that can be paired with X_max
for d1 in range(1, dim):
X_sub[
[[1]]
+ [[kernels[d2] // 2 + 1] for d2 in range(1, d1)]
+ [[kernels[d1] // 2, kernels[d1] // 2 + 2]]
+ [[kernels[d2] + 1, 0] for d2 in range(d1 + 1, dim)]
] = X_min
else:
# input channel 0 is all X_min to avoid overflow from vpmaddubsw when
# multiplied with W_min and W_max
X[..., 0] = X_min
if batch_size != 0:
X[(0,) * (X.ndim - 1) + (1,)] = X_max
if preserve_weight_sparsity:
W_min = -128
W_max = 100
else:
W_min = -100
W_max = W_min + 255
W = np.round(
np.random.rand(
*((output_channels,) + tuple(kernels) + (input_channels_per_group,))
)
* (W_max - W_min)
+ W_min
)
W = W.astype(np.float32)
if groupwise_quantization:
for g in range(group):
W[(g * output_channels_per_group,) + (0,) * (W.ndim - 1)] = W_min
if depthwise_convolution:
W[(g * output_channels_per_group, 1) + (0,) * (W.ndim - 2)] = W_max
else:
assert output_channels_per_group > 1
W[(g * output_channels_per_group + 1,) + (0,) * (W.ndim - 1)] = W_max
# Make sure each group has different ranges to really see the effect
# of group-wise quantization.
if not preserve_weight_sparsity:
W[
g * output_channels_per_group : (g + 1) * output_channels_per_group,
] += g
else:
W[(0,) + (0,) * (W.ndim - 1)] = W_min
W[(1,) + (0,) * (W.ndim - 1)] = W_max
different_range_per_group = groupwise_quantization and not preserve_weight_sparsity
for g in range(group):
avoid_vpmaddubsw_overflow(
strides,
pads,
kernels,
dilations,
sizes,
input_channels_per_group,
output_channels_per_group,
batch_size,
X[..., g * input_channels_per_group : (g + 1) * input_channels_per_group],
X_min,
X_max,
W[g * output_channels_per_group : (g + 1) * output_channels_per_group,],
W_min + (g if different_range_per_group else 0),
W_max + (g if different_range_per_group else 0),
)
if order == "NCHW":
X = utils.NHWC2NCHW(X)
W = utils.NHWC2NCHW(W)
b = np.random.randn(output_channels).astype(np.float32)
return X, W, b
def generate_conv_inputs(
stride,
pad,
kernel,
dilation,
size,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization=False,
preserve_activation_sparsity=False,
preserve_weight_sparsity=False,
):
return generate_convnd_inputs(
(stride,) * 2,
(pad,) * 2,
(kernel,) * 2,
(dilation,) * 2,
(size,) * 2,
group,
input_channels_per_group,
output_channels_per_group,
batch_size,
order,
groupwise_quantization,
preserve_activation_sparsity,
preserve_weight_sparsity,
)
def run_conv_or_fc(
test_case,
init_net,
net,
X,
W,
b,
op_type,
engine,
order,
gc,
outputs,
scale=None,
zero_point=None,
):
if order:
# Conv
Output = collections.namedtuple("Output", ["Y", "op_type", "engine", "order"])
else:
# FC
Output = collections.namedtuple("Output", ["Y", "op_type", "engine"])
# We run DNNLOWP ops multiple times to test their first runs that
# do caching so exercises different code paths from the subsequent
# runs
# self.ws.run re-creates operator every time so this test covers
# cases when we have multiple nets sharing the same workspace
test_case.ws.create_blob("X").feed(X, device_option=gc)
test_case.ws.create_blob("W").feed(W, device_option=gc)
test_case.ws.create_blob("b").feed(b, device_option=gc)
if scale is not None and zero_point is not None:
with workspace.WorkspaceGuard(test_case.ws):
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if init_net:
test_case.ws.run(init_net)
for i in range(1 if engine == "" else 2):
test_case.ws.run(net)
Y = test_case.ws.blobs["Y"].fetch()
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
# workspace.CreateNet + workspace.RunNet reuses the same operator
if engine != "":
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
if scale is not None and zero_point is not None:
dnnlowp_pybind11.CreateInt8QuantParamsBlob(
"quant_param", float(scale), int(zero_point)
)
if init_net:
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
for i in range(2):
workspace.RunNet(net)
Y = workspace.FetchBlob("Y")
if order:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine, order=order))
else:
outputs.append(Output(Y=Y, op_type=op_type, engine=engine))
|
py | 1a41665cf2b3972db39825532fba4880bdb07868 | import typing
from enum import Enum, auto
from expr import Array, Assign, Binary, Call, Expr, Index, Get, Grouping, Lambda, Literal, Logical, Set, SetArray, Ternary, This, Unary, Variable
from interpreter import Interpreter
from stmt import Block, Break, Class, Expression, Function, If, Import, Print, Return, Stmt, Var, While
from lox_token import Token
class FunctionType(Enum):
NONE = auto()
FUNCTION = auto()
INITIALIZER = auto()
METHOD = auto()
class ClassType(Enum):
NONE = auto()
CLASS = auto()
SUBCLASS = auto()
class Resolver(Expr.Visitor, Stmt.Visitor):
def __init__(self, interpreter: Interpreter):
super().__init__()
self.interpreter = interpreter
self.scopes = []
self.current_function = FunctionType.NONE
self.current_class = ClassType.NONE
self.current_loop = False
def visit_array_expr(self, expr: Array) -> object:
for element in expr.elements:
self.resolve(element)
def visit_assign_expr(self, expr: Assign) -> object:
self.resolve(expr.value)
self.resolve_local(expr, expr.name)
def visit_binary_expr(self, expr: Binary) -> object:
self.resolve(expr.left)
self.resolve(expr.right)
def visit_call_expr(self, expr: Call) -> object:
self.resolve(expr.callee)
for argument in expr.arguments:
self.resolve(argument)
def visit_index_expr(self, expr: Index) -> object:
self.resolve(expr.objekt)
self.resolve(expr.index)
def visit_get_expr(self, expr: Get) -> object:
self.resolve(expr.objekt)
def visit_grouping_expr(self, expr: Grouping) -> object:
self.resolve(expr.expression)
def visit_lambda_expr(self, expr: Lambda) -> object:
self.resolve_function(expr, FunctionType.FUNCTION)
def visit_literal_expr(self, expr: Literal) -> object:
return None
def visit_logical_expr(self, expr: Logical) -> object:
self.resolve(expr.left)
self.resolve(expr.right)
def visit_set_expr(self, expr: Set) -> object:
self.resolve(expr.value)
self.resolve(expr.objekt)
def visit_setarray_expr(self, expr: SetArray) -> object:
self.resolve(expr.value)
self.resolve(expr.index)
self.resolve(expr.objekt)
def visit_ternary_expr(self, expr: Ternary) -> object:
self.resolve(expr.conditional)
self.resolve(expr.truthy)
self.resolve(expr.falsy)
def visit_this_expr(self, expr: This) -> object:
if self.current_class == ClassType.NONE:
self.interpreter.reporter.parse_error(expr.keyword, "Can't use 'this' outside of a class.")
return None
self.resolve_local(expr, expr.keyword)
def visit_unary_expr(self, expr: Unary) -> object:
self.resolve(expr.right)
def visit_variable_expr(self, expr: Variable) -> object:
if len(self.scopes) != 0 and expr.name.lexeme in self.scopes[-1] and self.scopes[-1][expr.name.lexeme]['is_defined'] == False:
self.interpreter.reporter.parse_error(expr.name, "Can't read local variable in its own initializer.")
self.resolve_local(expr, expr.name)
def visit_block_stmt(self, stmt: Block):
self.begin_scope()
self.resolve_statements(stmt.statements)
self.end_scope()
def visit_break_stmt(self, stmt: Break):
if not self.current_loop:
self.interpreter.reporter.parse_error(stmt.keyword, "Break statement outside of enclosing loop.")
return None
def visit_class_stmt(self, stmt: Class):
enclosing_class = self.current_class
self.current_class = ClassType.CLASS
self.declare(stmt.name)
self.define(stmt.name)
if stmt.name.lexeme in {superclass.name.lexeme for superclass in stmt.superclasses}:
self.interpreter.reporter.parse_error(stmt.name, "A class can't inherit from itself.")
if len(stmt.superclasses) > 0:
self.current_class = ClassType.SUBCLASS
for superclass in stmt.superclasses:
self.resolve(superclass)
self.begin_scope()
self.scopes[-1]["this"] = {"is_defined": True, "token": stmt.name}
for method in stmt.class_methods+stmt.instance_methods+stmt.getters:
declaration = FunctionType.METHOD
if method.name.lexeme == "init":
declaration = FunctionType.INITIALIZER
self.resolve_function(method, declaration)
self.end_scope()
self.current_class = enclosing_class
def visit_expression_stmt(self, stmt: Expression):
self.resolve(stmt.expression)
def visit_function_stmt(self, stmt: Function):
self.declare(stmt.name)
self.define(stmt.name)
self.resolve_function(stmt, FunctionType.FUNCTION)
def resolve_function(self, function: typing.Union[Function, Lambda], _type: FunctionType):
enclosing_function = self.current_function
self.current_function = _type
self.begin_scope()
for param in function.params:
self.declare(param)
self.define(param)
self.resolve_statements(function.body)
self.end_scope()
self.current_function = enclosing_function
def visit_if_stmt(self, stmt: If):
self.resolve(stmt.condition)
self.resolve(stmt.then_branch)
if stmt.else_branch is not None:
self.resolve(stmt.else_branch)
def visit_import_stmt(self, stmt: Import):
return None
def visit_print_stmt(self, stmt: Print):
self.resolve(stmt.expression)
def visit_return_stmt(self, stmt: Return):
if self.current_function == FunctionType.NONE:
self.interpreter.reporter.parse_error(stmt.keyword, "Can't return from top-level code.")
if stmt.value is not None:
if self.current_function == FunctionType.INITIALIZER:
self.interpreter.reporter.parse_error(stmt.keyword, "Can't return a value from an initializer.")
self.resolve(stmt.value)
def visit_var_stmt(self, stmt: Var):
self.declare(stmt.name)
if stmt.initializer is not None:
self.resolve(stmt.initializer)
self.define(stmt.name)
def visit_while_stmt(self, stmt: While):
self.resolve(stmt.condition)
self.enclosing_loop = self.current_loop
self.current_loop = True
self.resolve(stmt.body)
self.current_loop = self.enclosing_loop
def declare(self, name: Token):
if len(self.scopes) == 0:
return
scope = self.scopes[-1]
if name.lexeme in scope:
self.interpreter.reporter.parse_error(name, "Already a variable with this name in this scope.")
scope[name.lexeme] = {"is_defined": False, "token": name}
def define(self, name: Token):
if len(self.scopes) == 0:
return
self.scopes[-1][name.lexeme]["is_defined"] = True
def resolve_statements(self, statements: list[Stmt]):
for statement in statements:
self.resolve(statement)
def resolve(self, item: typing.Union[Expr, Stmt]):
item.accept(self)
def resolve_local(self, expr: Expr, name: Token):
for i in range(len(self.scopes)-1, -1, -1):
if name.lexeme in self.scopes[i]:
self.interpreter.resolve(expr, len(self.scopes)-1-i)
return
def begin_scope(self):
self.scopes.append(dict())
def end_scope(self):
scope = self.scopes.pop()
used_locals = {expr.name.lexeme for expr in self.interpreter.locals.keys() if not (isinstance(expr, This))}
for var in set(scope.keys()).difference(used_locals):
if var != "this":
self.interpreter.reporter.parse_error(scope[var]["token"], "Unused local variable {}.".format(var))
|
py | 1a4166a6e675b347faf0c88b59c17a3774fac754 | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.string_enum import StringEnum
globals()['StringEnum'] = StringEnum
class ArrayOfEnums(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([StringEnum],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ArrayOfEnums - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([StringEnum]): # noqa: E501
Keyword Args:
value ([StringEnum]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""ArrayOfEnums - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([StringEnum]): # noqa: E501
Keyword Args:
value ([StringEnum]): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
py | 1a4167bddd8b16924f1aed25e45908c62ea77851 | #!/usr/bin/env python
# Assume python 2.6 or 2.7
import glob
import os
import subprocess
## Simple test runner.
# -- config -----------------------
# Absolute path pointing to your cloned git repo of https://github.com/KhronosGroup/glTF-Sample-Models
sample_model_dir = "/home/syoyo/work/glTF-Sample-Models"
base_model_dir = os.path.join(sample_model_dir, "2.0")
# Include `glTF-Draco` when you build `loader_example` with draco support.
kinds = [ "glTF", "glTF-Binary", "glTF-Embedded", "glTF-MaterialsCommon"]
# ---------------------------------
failed = []
success = []
def run(filename):
print("Testing: " + filename)
cmd = ["./loader_example", filename]
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
except:
print("Failed to execute: ", cmd)
raise
if p.returncode != 0:
failed.append(filename)
print(stdout)
print(stderr)
else:
success.append(filename)
def test():
for d in os.listdir(base_model_dir):
p = os.path.join(base_model_dir, d)
if os.path.isdir(p):
for k in kinds:
targetDir = os.path.join(p, k)
g = glob.glob(targetDir + "/*.gltf") + glob.glob(targetDir + "/*.glb")
for gltf in g:
run(gltf)
def main():
test()
print("Success : {0}".format(len(success)))
print("Failed : {0}".format(len(failed)))
for fail in failed:
print("FAIL: " + fail)
if __name__ == '__main__':
main()
|
py | 1a41689a9608be0568ee931fa0cd2b0c337cbd81 | import os
import time
import hashlib
import argparse
import pandas as pd
import subprocess
def unmount_SDs(sd_prefix):
'''
Unmount all disks named with matching prefix
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
'''
cwd = os.getcwd()
filename = "SDlist.txt"
for i in range(len(sd_prefix)):
cmd = "diskutil list | grep " + sd_prefix[i] + " > " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # get disk info for mounted SDs with specified prefix using diskutil
if os.stat(filename).st_size != 0: # trying to read an empty file will throw an error
lst = pd.read_csv(filename, header=None, delim_whitespace=True) # strip out disk name(s) (ie /dev/disk2) and associated SD name(s)
lst.columns = ["0", "format", "name", "size", "size-units", "disk"]
disks = lst["disk"].values
names = lst["name"].values
for i in range(len(disks)):
cmd = "diskutil unmountDisk /dev/" + disks[i][0:-2] # 'diskutil list' actually saves name like "disk2s2", so strip off last two characters
subprocess.call(cmd, shell=True)
cmd = "rm " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # delete SD reference file when done
def getlocalfile_md5(fname):
'''
Calculate the MD5 hash of a file to be used to check successful transfer
Inputs:
fname: filename to check
'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def copyfile_local(fname, srcpath, dstpath, delete_choice):
'''
Copy a file from a disk to a desired local destination, confirm proper data transfer, then delete original if desired
Inputs:
fname: file to transfer
srcpath: the path to the location of the file to transfer
dstpath: the path to the desired copy destination location
delete_choice: boolean decision to delete or keep the file in its original location after copy
'''
if not fname.startswith("."):
fname_safe = fname.replace(" ", "_") # replace whitespace in filename with underscores, if there.
if fname_safe != fname:
cmd = "mv " + srcpath + "/'" + fname + "' " + srcpath + "/" + fname_safe
os.system(cmd)
fname = fname_safe
copied = dstpath + "/" + fname
cmd = "cp -p " + srcpath + "/" + fname + " " + dstpath
os.system(cmd)
md5local = getlocalfile_md5(srcpath + '/' + fname) # get hash for original file
md5sd = getlocalfile_md5(copied) # get hash for new local copy
if not (md5local == md5sd):
print("Oh no! Hash test failed for " + dstpath + "/" + fname + ". Trying again.") # if copy doesn't match original, try again
copyfile_local(fname, srcpath, dstpath, delete_choice)
else:
if delete_choice: # delete file from sd if user specified to
os.remove(srcpath + "/" + fname)
def transfer_folder_contents(dst_path, sd_src_path, delete_choice):
'''
Transfer the entire contents of a folder to local storage
Inputs:
dst_path: the path to the desired copy destination location
sd_src_path: the path to the disk location of the folder to transfer
delete_choice: boolean decision to delete or keep file(s) in their original location after copy
'''
if not os.path.isdir(dst_path):
os.makedirs(dst_path, mode=0o777) # make directory with folders inside for each disk (or subdirectory)
files = os.listdir(path=sd_src_path)
for file in files:
if not file.startswith("."): # ignore hidden files
if os.path.isdir(sd_src_path + "/" + file): #recursively copy nested folders
local_subpath = dst_path + "/" + file
sd_subpath = sd_src_path + "/" + file
transfer_folder_contents(local_subpath, sd_subpath, delete_choice)
else: # bottom of the line. Copy file
copyfile_local(file, sd_src_path, dst_path, delete_choice)
def get_disks(sd_prefix, sd_mount):
'''
Get list of disks matching prefix
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
Returns a list of disks matching prefix
'''
# Account for naming errors (Kitzes Lab convention)
if sd_prefix[0] == "MSD":
sd_prefix.extend(["MS", "MD", "DMS", "DSM", "SDM", "SMD"])
# Get list of disks matching prefix
disks = os.listdir(path=sd_mount) # SD cards mount to /Volumes on Mac
matching_disks = [disk for disk in disks if disk.startswith(tuple(sd_prefix))]
if args.local or args.globus:
print(" Transferring files from " + str(len(matching_disks)) + " disks:\n")
return matching_disks
def local_transfer(sd_prefix, sd_mount, local_path, delete_choice, reformat_choice, unmount_choice):
'''
Initiate a local transfer from all SD cards meeting specs
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
local_path: destination for local copy
delete_choice: boolean decision whether to delete file(s) in original location after copy
reformat_choice: boolean decision whether to reformat cards after copy
unmount_choice: boolean decision wether to unmount cards after copy
'''
# Get list of disks matching prefixes
matching_disks = get_disks(sd_prefix, sd_mount)
# Transfer contents of all matching disks
for disk in matching_disks:
folder_name = str(disk)
sd_fullpath = sd_mount + "/" + disk
local_fullpath = os.path.join(local_path, folder_name)
transfer_folder_contents(local_fullpath, sd_fullpath, delete_choice)
print(" Files from " + disk + " copied to " + local_fullpath + ".")
if reformat_choice:
reformat_SDs_FAT32(matching_disks, sd_mount)
if not reformat_choice and unmount_choice:
unmount_SDs(matching_disks)
def globus_upload(sd_p, sd_mount, upload_dir, delete_choice, reformat_choice):
'''
Initiate a Globus transfer from all SD cards meeting specs
Inputs:
sd_p: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
upload_dir: upload destination in Globus filesystem
delete_choice: boolean decision whether to delete file(s) in original location after copy
reformat_choice: boolean decision whether to reformat cards after copy
'''
import globus_sdk
#only import if user needs - this will slow things down very slightly for Globus users, but save time for local users
CLIENT_ID = "" # app
MYENDPOINT_ID = "" # UUID
DTN_ID = "" # dtn
client = globus_sdk.NativeAppAuthClient(CLIENT_ID)
client.oauth2_start_flow(refresh_tokens=True)
authorize_url = client.oauth2_get_authorize_url()
print('Please go to this URL and login: {0}'.format(authorize_url))
get_input = getattr(__builtins__, 'raw_input', input) # get correct input() fn to be compatible with Python2 or 3
auth_code = get_input("Please enter the code you get after login here: ").strip()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
globus_auth_data = token_response.by_resource_server["auth.globus.org"]
globus_transfer_data = token_response.by_resource_server["transfer.api.globus.org"]
# most specifically, you want these tokens as strings
AUTH_TOKEN = globus_auth_data["access_token"]
TRANSFER_TOKEN = globus_transfer_data["access_token"]
# a GlobusAuthorizer is an auxiliary object we use to wrap the token. In more
# advanced scenarios, other types of GlobusAuthorizers give us expressive power
authorizer = globus_sdk.AccessTokenAuthorizer(TRANSFER_TOKEN)
tc = globus_sdk.TransferClient(authorizer=authorizer)
tdata = globus_sdk.TransferData(tc, MYENDPOINT_ID, DTN_ID, label="", sync_level="checksum",preserve_timestamp=True,verify_checksum=True)
upload_dir = "~/" + upload_dir #f"~/{upload_dir}"
tc.operation_mkdir(DTN_ID, path=upload_dir) # new directory in ~/ibwo for each SD
# you will error out if you specified a directory that already exists
# Get list of disks matching prefixes
matching_disks = get_disks(sd_prefix, sd_mount)
# Upload contents of all matching disks
for d in matching_disks:
new_folder = upload_dir + "/" + str(disk)
sd_fullpath = sd_mount + "/" + str(disk)
tc.operation_mkdir(DTN_ID, path=new_folder) # new directory in indicated directory for each SD
files = os.listdir(path=sd_fullpath)
for file in files:
if not file.startswith("."): #ignore hidden files
if os.path.isdir(sd_fullpath + "/" + file): # recursively copy nested folders
tdata.add_item( sd_fullpath + "/" + file, new_folder + "/" + file, recursive=True)
else:
tdata.add_item(sd_fullpath + "/" + file, new_folder + "/" + file) # copy from SD to new Globus dir
transfer_result = tc.submit_transfer(tdata)
print("Globus task_id =", transfer_result["task_id"])
# not sure if it is safe to reformat right now, when globus transfer has been initiated but not necessarily completed.
# if(reformat_choice):
# reformat_SDs_FAT32(sd_prefix)
def reformat_SDs_FAT32(sd_prefix, sd_mount):
'''
Reformat disks matching prefix to FAT32 format (and delete all contents)
Inputs:
sd_prefix: user-specified list of sd card prefixes to use
sd_mount: mount point for SD cards
'''
print("\n Reformatting SD cards.\n---")
cwd = os.getcwd()
filename = "SDlist.txt"
for i in range(len(sd_prefix)):
cmd = "diskutil list | grep " + sd_prefix[i] + " > " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # get disk info for mounted SDs with specified prefix using diskutil
if os.stat(filename).st_size != 0: # trying to read an empty file will throw an error
lst = pd.read_csv(filename, header=None, delim_whitespace=True) # strip out disk name(s) (ie /dev/disk2) and associated SD name(s)
lst.columns = ["0", "format", "name", "size", "size-units", "disk"]
disks = lst["disk"].values
names = lst["name"].values
for i in range(len(disks)): # reformat cards to clean FAT32 with original names
cmd = "diskutil eraseDisk FAT32 " + names[i] + " MBRFormat /dev/" + disks[i][0:-2] # 'diskutil list' actually saves name like "disk2s2", so strip off last two characters
subprocess.call(cmd, shell=True)
if not args.unmount:
cmd = "diskutil mountDisk /dev/" + disks[i][0:-2]
subprocess.call(cmd,shell=True)
print("---")
cmd = "rm " + cwd + "/" + filename
subprocess.call(cmd,shell=True) # delete SD reference file when done
if args.unmount:
matching_disks = get_disks(sd_prefix, sd_mount)
unmount_SDs(matching_disks)
###################################################################################### MAIN
start = time.time()
donemsg = 1
local = 1
parser = argparse.ArgumentParser(description="Transfer files from SD card(s) to local storage or Globus cloud storage, and/or delete data or reformat SD card(s).")
parser.add_argument("-p", "--prefix", nargs='+', required=True, help="Prefix(es) of all your SD cards' names. Enter multiple prefixes separated by spaces to indicate a range of prefixed names. [Required]")
parser.add_argument("-m", "--mountPath", default='/Volumes', help ="The path to where SD cards mount on this computer (defaults to Mac's mountpoint: /Volumes). [Optional]")
parser.add_argument("-l", "--local", help="New local directory name (with path) to save data to. [Required for local transfer]")
parser.add_argument("-g", "--globus", help="New directory name (with absolute path) in your Globus filesystem to upload data to.[Required for local Globus transfer]")
parser.add_argument("-d", "--delete", action='store_true', help="Delete files from SD cards after transfer and confirmation are complete. Files are only deleted if this flag is included. [Optional]")
parser.add_argument("-r", "--reformat", action='store_true', help="Reformat SD card to FAT32, maintaining its name. WARNING: all data will be deleted during reformat, even if you didn't specify the -d flag (defaults to not reformat). To reformat but not transfer any data, use -l 0 -g 0 -r. [Optional]")
parser.add_argument("-u", "--unmount", action='store_true', help="Unmount SD cards from your computer after done with local copy or reformat. Don't use this for Globus upload! [Optional]")
parser.add_argument("-y", "--yes", action='store_true', help="Include this flag if you want to force deletion or reformatting without typing Y in the menu [Optional]")
args = parser.parse_args()
print(" SD prefix(es): ")
for i in args.prefix:
print(" " + i)
print(" SD mount path: " + args.mountPath)
# Print delete & reformatting message - make sure they're serious about deleting data off cards
if args.delete:
if not args.yes:
tmp = input("\n Please confirm (Y/N) that you want to delete all files from the SD cards after transfer is done:\n >>> ")
if tmp == "Y" or tmp == "y":
print(" Great! Just making sure.\n")
time.sleep(2)
else:
print(" Ok! Continuing with copy, but files will NOT be deleted.\n")
args.delete = False
time.sleep(2)
else:
print(" Deleting data after transfer complete.\n")
time.sleep(2)
if args.reformat:
sd_prefix = args.prefix
if sd_prefix[0] == "MSD":
sd_prefix.extend(["MDS", "DMS", "DSM", "SDM", "SMD"]) # account for naming errors
if not args.yes:
tmp = input("\n Please confirm (Y/N) that you want to reformat and delete all files from the SD cards after transfer is done (if any):\n >>> ")
if tmp == 'Y' or tmp == 'y':
print(" Great! Just making sure.\n")
time.sleep(2)
else:
print("Ok! Continuing with copy, but SD cards will NOT be reformatted.\n")
time.sleep(2)
args.reformat = False
else:
print(" Reformatting SD cards after transfer complete (if any).")
time.sleep(2)
if args.globus:
print(" Ignoring -r (reformat) flag - run again after Globus Upload is complete to ensure data isn't deleted before it istransferred.\n.")
if (not args.globus) and (not args.local): #initiate reformat if no transfer happening
reformat_SDs_FAT32(sd_prefix, sd_mount)
# Initiate local transfer
if args.local:
print(" Saving to local directory: " + args.local)
local_transfer(args.prefix, args.mountPath, args.local, args.delete, args.reformat, args.unmount)
# Initiate Globus transfer
if args.globus:
local = 0
print(" Uploading to directory " + args.globus + " on Globus.")
tmp = input('\n Please confirm (Y/N) that you want to begin a Globus transfer, and have already updated the python script to include your Globus IDs (see README)\n >>> ')
if tmp == "Y" or tmp == "y":
globus_upload(args.prefix, args.mountPath, args.globus, args.delete, args.reformat)
else:
donemsg = 0
print(" Exiting.")
# 'Ppeace out'
if donemsg:
if not local:
print("\n Globus transfer initiated.\n")
if args.local:
print("\n Done with local transfer! Executed in " + str(time.time()-start) + " seconds\n")
print("\n Done with reformatting!\n")
|
py | 1a416ac96a25039a660a1db9a8a94154ad7e1fe4 | import struct
from io import BytesIO
from ot_types import *
#inline below:
# import ot_table
# from ot_font import OTFont, TableRecord
# from ot_file import calcCheckSum
class Table_head:
_expectedTag = "head"
# head v1.0 format
_head_version = ">2H"
_head_version_size = struct.calcsize(_head_version)
_head_1_0_format = ">2H4s2L2H2q" + "4h" + "2H" + "3h"
""" Structure:
(big endian) >
majorVersion uint16 H
minorVersion uint16 H
fontRevision Fixed 4s
checkSumAdjustment uint32 L
magicNumber uint32 L
flags uint16 H
unitsPerEm uint16 H
created LONGDATETIME q
modified LONGDATETIME q
xMin int16 h
yMin int16 h
xMax int16 h
yMax int16 h
macStyle uint16 H
lowestRecPPEM uint16 H
fontDirectionHint int16 h
indexToLocFormat int16 h
glyphDataFormat int16 h
"""
_head_1_0_size = struct.calcsize(_head_1_0_format)
_head_1_x_checkSumAdjustment_offset = struct.calcsize(">2H4s")
_head_1_0_fields = (
"majorVersion",
"minorVersion",
"fontRevision",
"checkSumAdjustment",
"magicNumber",
"flags",
"unitsPerEm",
"created",
"modified",
"xMin",
"yMin",
"xMax",
"yMax",
"macStyle",
"lowestRecPPEM",
"fontDirectionHint",
"indexToLocFormat",
"glyphDataFormat"
)
_head_1_0_defaults = (
1, # majorVersion
0, # minorVersion
b'\x00\x00\x00\x00', # fontRevision
0, # checkSumAdjustment
0x5F0F3CF5, # magicNumber
0, # flags
2048, # unitsPerEm
0, # created
0, # modified
0, # xMin
0, # yMin
0, # xMax
0, # yMax
0, # macStyle
0, # lowestRecPPEM
0, # fontDirectionHint
0, # indexToLocFormat
0 # glyphDataFormat
)
def __init__(self):
self.tableTag = Tag(self._expectedTag)
@staticmethod
def createNew_head():
"""Creates a new version 1.0 hhea table with default values."""
head = Table_head()
for k, v in zip(head._head_1_0_fields, head._head_1_0_defaults):
if k != "fontRevision":
setattr(head, k, v)
head.fontRevision = Fixed(head._head_1_0_defaults[2])
return head
@staticmethod
def tryReadFromFile(parentFont, tableRecord):
"""Returns a Table_head constructed from data in fileBytes.
Exceptions may be raised if tableRecord.tableTag doesn't match,
or if tableRecord.offset or .length do not fit within the file.
"""
head = Table_head()
from ot_font import OTFont, TableRecord
if not (isinstance(parentFont, OTFont) and isinstance(tableRecord, TableRecord)):
raise Exception()
import ot_table
ot_table.ValidateTableTag(tableRecord, head._expectedTag)
head.parentFont = parentFont
head.tableRecord = tableRecord
# get file bytes, then validate offset/length are in file bounds
fileBytes = parentFont.fileBytes
offsetInFile = tableRecord.offset
ot_table.ValidateOffsetAndLength(
len(fileBytes), offsetInFile, tableRecord.length
)
# get the table bytes: since offset length are in bounds, can get the expected length
tableBytes = fileBytes[offsetInFile : offsetInFile + tableRecord.length]
# check the version
if len(tableBytes) < head._head_version_size:
raise OTCodecError("The table lenght is wrong: can't even read the version.")
vals = struct.unpack(head._head_version, tableBytes[:head._head_version_size])
head.majorVersion, head.minorVersion = vals
if head.majorVersion != 1:
raise OTCodecError(f"Unsupported table version: {hhea.majorVersion}.{hhea.minorVersion}")
if len(tableBytes) < head._head_1_0_size:
raise OTCodecError(f"Can't read the version {hhea.majorVersion}.{hhea.minorVersion} hhea table: the table is too short.")
# unpack
vals = struct.unpack(head._head_1_0_format, tableBytes)
head.fontRevision = Fixed(vals[2])
for k, v in zip(head._head_1_0_fields[3:], vals[3:]):
setattr(head, k, v)
# calculate checksum
# Note: a special calculation is required for the head table. We need
# to make sure to include pad bytes from the file.
padded_length = (tableRecord.length + 3) - (tableRecord.length + 3) % 4
tableBytes = fileBytes[offsetInFile : offsetInFile + padded_length]
head.calculatedCheckSum = head._calcHeadCheckSum(tableBytes)
# Calculating the checkSumAdjustment for the font is somewhat costly,
# so don't do it up front; leave it until it's needed.
# head.calculatedCheckSumAdjustment = head._calcCheckSumAdjustment(parentFont)
return head
# End of tryReadFromFile
@staticmethod
def _calcHeadCheckSum(headBytes:bytes):
"""Calculates a checksum for the head table based on the provided data.
Can be called for a head table read from a file or a new head table
created in memory. A version 1.x table is assumed. The length of the
data should be a multiple of four. If not, the checksum will be
calculated after padding with null bytes. If the data is read from a
file, you should include padding bytes from the file.
"""
assert isinstance(headBytes, (bytearray, bytes, memoryview))
assert len(headBytes) >= Table_head._head_1_0_size
"""
The 'head' table requires special handling for calculating a checksum. The
process also involves the head.checksumAdjustment field.
From OT spec (v1.8.3) font file regarding TableRecord.checkSum for 'head':
To calculate the checkSum for the 'head' table which itself includes the
checkSumAdjustment entry for the entire font, do the following:
1. Set the checkSumAdjustment to 0.
2. Calculate the checksum for all the tables including the 'head' table
and enter that value into the table directory.
NOTE: This wording is unclear and can be misleading. The TableRecord.checkSum
for 'head' is calculated using the modified 'head' data only, not the rest of
the file.
From OT spec 'head' table regarding checkSumAdjustment:
To compute it: set it to 0, sum the entire font as uint32,
then store 0xB1B0AFBA - sum.
If the font is used as a component in a font collection file, the value
of this field will be invalidated by changes to the file structure and
font table directory, and must be ignored.
If in a TTC, ignore all that and just set both calculated values to 0.
"""
headCopy = bytearray(headBytes)
headCopy[Table_head._head_1_x_checkSumAdjustment_offset
: Table_head._head_1_x_checkSumAdjustment_offset + 4] = [0,0,0,0]
from ot_file import calcCheckSum
return calcCheckSum(headCopy)
# End _calcHeadCheckSum
def calcCheckSumAdjustment(self):
"""Calculates the checkSumAdjustment for the font containing the head
table. If the font is within a TTC, returns 0.
The checkSumAdjustment value is returned. No font data is changed.
The head table must have a parentFont attribute set to an OTFont
object, and that OT font must have the fileBytes attribute set to a
byte sequence containing the font data. This should only be called for
font data read from a file or for a complete font created in memory.
"""
from ot_font import OTFont, TableRecord
assert hasattr(self, "parentFont")
font = self.parentFont
assert isinstance(font, OTFont)
assert hasattr(font, "fileBytes")
# If within TTC, just return 0
if font.isWithinTtc:
return 0
# get the head TableRecord
head_rec = font.offsetTable.tryGetTableRecord("head")
if head_rec is None:
return None
# To calculate checkSumAdjustment, the font file must be modified by
# setting head.checkSumAdjustment to 0. A checksum is calculated for
# the entire font file with that modification. After computing the
# file checksum, the differnce from 0xB1B0AFBA is taken.
# https://docs.microsoft.com/en-us/typography/opentype/spec/otff#calculating-checksums
#
# To avoid copying the entire font data to make a small change, the
# file checksum can be computed sequentially on three segments:
#
# 1) data before the modified head table (not copied)
# 2) continue with a modified copy of the head table
# 3) continue with the remainder (not copied)
#
# A memoryview will be used to avoid copying.
fontBytesView = memoryview(font.fileBytes)
# All tables offsets (from start of file) are expected to be multiples
# of 4, though that might not be true in some fonts. Checksums must be
# calculated on 4-byte increments. Determine if we need to work around
# any such quirk.
phase = 4 - (head_rec.offset % 4) if (head_rec.offset % 4) != 0 \
else (head_rec.offset % 4)
# phase is the number of extra bytes from the start of the head table
# to include in the first segment
from ot_file import calcCheckSum
# get checksum for the first segment
first_segment_length = head_rec.offset + phase
assert first_segment_length % 4 == 0
first_segment = fontBytesView[:first_segment_length]
checksum = calcCheckSum(first_segment)
# For the second segment, use 12 bytes after the end of the first
# segment, which will include the head.checkSumAdjustment member.
# Get a copy and clear the checkSumAdjustment.
second_segment = bytearray(fontBytesView[first_segment_length : first_segment_length + 12])
csa_offset = Table_head._head_1_x_checkSumAdjustment_offset - phase
second_segment[csa_offset : csa_offset + 4] = [0,0,0,0]
# continue the checksum with the modified second segment
checksum = calcCheckSum(second_segment, leftPrior= checksum)
# finish the checksum with the third segment
third_segment = fontBytesView[first_segment_length + 12:]
checksum = calcCheckSum(third_segment, leftPrior= checksum)
return 0xB1B0AFBA - checksum
# End of _calcCheckSumAdjustment
# End of class Table_head
|
py | 1a416b72ed5e784376f241cef2ac4aec70c18545 | import numpy as np
import pandas as pd
from scipy.sparse import issparse
from . ItClust import transfer_learning_clf
from . calculate_adj import distance
from . calculate_adj import calculate_adj_matrix
from . utils import find_l
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class SpaDecon(object):
def __init__(self):
super(SpaDecon, self).__init__()
def deconvolution(self, source_data, target_data, histology_image=None, spatial_locations = None, p=0.5, histology = True, spatial = True, adj_matrix=None, adj = False, technology = 'Visium'):
if technology=='Visium':
threshold = 1/30
elif technology=='ST':
threshold = 1/200
if issparse(target_data.X):
target_data.X=target_data.X.A
target_data.var_names=[i.upper() for i in list(target_data.var_names)]
target_data.var["genename"]=target_data.var.index.astype("str")
if adj:
self.adj = adj_matrix
l = find_l(p, [i*0.1 for i in range(1,20)], self.adj)
adj_sub=np.exp(-1*self.adj/(2*(l**2)))
target_data.X=np.matmul(adj_sub,target_data.X)
elif spatial:
target_data.obs["x1"] = np.array(spatial_locations[1])
target_data.obs["x2"] = np.array(spatial_locations[2])
target_data.obs["x3"] = np.array(spatial_locations[3])
target_data.obs["x4"] = np.array(spatial_locations[4])
target_data.obs["x5"] = np.array(spatial_locations[5])
target_data=target_data[target_data.obs["x1"]==1]
adj=calculate_adj_matrix(x=target_data.obs["x2"].tolist(),y=target_data.obs["x3"].to_list(), x_pixel=target_data.obs["x4"].to_list(), y_pixel=target_data.obs["x5"].to_list(), image=histology_image, histology = histology)
#self.adj = adj
l = find_l(p, [i*0.1 for i in range(1,20)], adj)
adj_sub=np.exp(-1*adj/(2*(l**2)))
target_data.X=np.matmul(adj_sub,target_data.X)
del adj
clf=transfer_learning_clf()
clf.fit(source_data, target_data, tol = [0.01], threshold = threshold)
type_pred = clf.predict(write=False)
spad_props = type_pred[1]
# spad_props.columns = [i[0] for i in type_pred[2].values()]
spad_props.columns = clf.celltypes_final
spad_props.index = [i[0:len(i)-7] for i in spad_props.index]
self.props = spad_props
return spad_props
|
py | 1a416db23b7f144e75eaa25147df6929f3fd0ee0 | def minmax(data):
min = max = data[0]
for number in data:
if number < min:
min = number
elif number > max:
max = number
return (min, max)
|
py | 1a416e2249c611e534e5f9fde3c65f7839b80121 | import copy
### struct to keep track of position for error messages
class Position(object):
def __init__(self, ref):
self.ref = ref
self.line = 0
self.top = None
self.uses_pos = None
def __str__(self):
s = self.ref + ':' + str(self.line)
if self.uses_pos is None:
return s
else:
return str(self.uses_pos) + ' (at ' + s + ')'
### Exceptions
class Abort(Exception):
"""used for non-recoverable errors to abort parsing"""
pass
class Eof(Exception):
"""raised by tokenizer when end of file is detected"""
pass
class EmitError(Exception):
"""raised by plugins to fail the emit() function"""
def __init__(self, msg="", exit_code=1):
self.msg = msg
self.exit_code = exit_code
### error codes
## level:
## 1: critical error, can not be made into a warning
## 2: major error, can not be made into a warning
## 3: minor error, can be made into warning with -W
## 4: warning
error_codes = \
{
'READ_ERROR':
(1,
'read error: %s'),
'EOF_ERROR':
(1,
'premature end of file'),
'EXPECTED_QUOTED_STRING':
(1,
'expected quoted string after \'+\' operator'),
'UNKNOWN_KEYWORD':
(1,
'unknown keyword "%s"'),
'INCOMPLETE_STATEMENT':
(1,
'unterminated statement definition for keyword "%s", looking at %s'),
'EXPECTED_KEYWORD':
(1,
'expected keyword "%s"'),
'EXPECTED_KEYWORD_2':
(1,
'expected keyword "%s" as child to "%s"'),
'EXPECTED_DATA_DEF':
(1,
'expected a data definition statement as child to "%s"'),
'UNEXPECTED_KEYWORD':
(1,
'unexpected keyword "%s"'),
'UNEXPECTED_KEYWORD_1':
(1,
'unexpected keyword "%s", expected "%s"'),
'UNEXPECTED_KEYWORD_N':
(1,
'unexpected keyword "%s", expected one of %s'),
'UNEXPECTED_KEYWORD_CANONICAL':
(1,
'keyword "%s" not in canonical order, (See RFC 6020, Section 12)'),
'UNEXPECTED_KEYWORD_CANONICAL_1':
(1,
'keyword "%s" not in canonical order,'
'expected "%s", (See RFC 6020, Section 12)'),
'EXPECTED_ARGUMENT':
(1,
'expected an argument for keyword "%s"'),
'UNEXPECTED_ARGUMENT':
(1,
'did not expect an argument, got "%s"'),
'XML_IDENTIFIER':
(3,
'illegal identifier "%s", must not start with [xX][mM][lL]'),
'TRAILING_GARBAGE':
(2,
'trailing garbage after module'),
'BAD_VALUE':
(1,
'bad value "%s" (should be %s)'),
'CIRCULAR_DEPENDENCY':
(1,
'circular dependency for %s "%s"'),
'MODULE_NOT_FOUND':
(1,
'module "%s" not found in search path'),
'MODULE_NOT_FOUND_REV':
(1,
'module "%s" revision "%s" not found in search path'),
'MODULE_NOT_IMPORTED':
(1,
'no module with the namespace "%s" is imported'),
'BAD_IMPORT':
(1,
'cannot import %s "%s", must be a module'),
'BAD_IMPORT_YANG_VERSION':
(1,
'a version %s module cannot import a version %s module by revision'),
'BAD_INCLUDE':
(1,
'cannot include %s "%s", must be a submodule'),
'BAD_INCLUDE_YANG_VERSION':
(1,
'cannot include a version %s submodule in a version %s module'),
'BAD_MODULE_NAME':
(2,
'unexpected modulename "%s" in %s, should be %s'),
'WBAD_MODULE_NAME':
(4,
'unexpected modulename "%s" in %s, should be %s'),
'BAD_REVISION':
(3,
'unexpected latest revision "%s" in %s, should be %s'),
'WBAD_REVISION':
(4,
'unexpected latest revision "%s" in %s, should be %s'),
'BAD_SUB_BELONGS_TO':
(1,
'module %s includes %s, but %s does not specify a correct belongs-to'),
'MISSING_INCLUDE':
(1,
'submodule %s is included by %s, but not by the module %s'),
'PREFIX_ALREADY_USED':
(1,
'prefix "%s" already used for module %s'),
'PREFIX_NOT_DEFINED':
(1,
'prefix "%s" is not defined (reported only once)'),
'WPREFIX_NOT_DEFINED':
(4,
'prefix "%s" is not defined'),
'NODE_NOT_FOUND':
(1,
'node %s::%s is not found'),
'BAD_NODE_IN_AUGMENT':
(1,
'node %s::%s of type %s cannot be augmented'),
'BAD_NODE_IN_REFINE':
(1,
'node %s::%s cannot be refined'),
'BAD_REFINEMENT':
(1,
'"%s" node "%s::%s" cannot be refined with "%s"'),
'BAD_DEVIATE_KEY':
(2,
'key node "%s::%s" cannot be deviated with "not-supported"'),
'BAD_DEVIATE_ADD':
(2,
'the %s property already exists in node "%s::%s"'),
'BAD_DEVIATE_DEL':
(2,
'the %s property does not exist in node "%s::%s"'),
'BAD_DEVIATE_TYPE':
(2,
'the %s property cannot be added'),
'BAD_DEVIATE_WITH_NOT_SUPPORTED':
(2,
'cannot have other deviate statement together with "not-supported"'),
'EXTENSION_NOT_DEFINED':
(1,
'extension "%s" is not defined in module %s'),
'TYPE_NOT_FOUND':
(1,
'type "%s" not found in module %s'),
'FEATURE_NOT_FOUND':
(1,
'feature "%s" not found in module %s'),
'IDENTITY_NOT_FOUND':
(1,
'identity "%s" not found in module %s'),
'GROUPING_NOT_FOUND':
(1,
'grouping "%s" not found in module %s'),
'DEFAULT_CASE_NOT_FOUND':
(1,
'the default case "%s" is not found"'),
'MANDATORY_NODE_IN_DEFAULT_CASE':
(1,
'mandatory node in default case'),
'MULTIPLE_REFINE':
(1,
'the node "%s" is already refined at %s'),
'RANGE_BOUNDS':
(2,
'range error: "%s" is not larger than "%s"'),
'LENGTH_BOUNDS':
(2,
'length error: "%s" is not larger than "%s"'),
'LENGTH_VALUE':
(2,
'length error: "%s" is too large'),
'TYPE_VALUE':
(2,
'the value "%s" does not match its base type %s- %s'),
'DUPLICATE_ENUM_NAME':
(1,
'the enum name "%s" has already been used for the ' \
'enumeration at %s'),
'DUPLICATE_ENUM_VALUE':
(1,
'the integer value "%d" has already been used for the ' \
'enumeration at %s'),
'ENUM_VALUE':
(1,
'the enumeration value "%s" is not an 32 bit integer'),
'BAD_ENUM_VALUE':
(1,
'the given value "%s" does not match the base enum value "%d"'),
'DUPLICATE_BIT_POSITION':
(1,
'the position "%d" has already been used for the bit at %s'),
'BIT_POSITION':
(1,
'the position value "%s" is not valid'),
'BAD_BIT_POSITION':
(1,
'the given position "%s" does not match the base bit position "%d"'),
'NEED_KEY':
(1,
'the list needs at least one key'),
'NEED_KEY_USES':
(1,
'the list at "%s" needs at least one key because it is used as config'),
'KEY_BAD_CONFIG':
(1,
'the key "%s" does not have same "config" as its list'),
'BAD_KEY':
(1,
'the key "%s" does not reference an existing leaf'),
'BAD_UNIQUE':
(1,
'the unique argument "%s" does not reference an existing leaf'),
'BAD_UNIQUE_PART':
(1,
'the identifier "%s" in the unique argument does not reference '
'an existing container'),
'BAD_UNIQUE_PART_LIST':
(1,
'the identifier "%s" in the unique argument references a list; '
'this is not legal'),
'BAD_UNIQUE_CONFIG':
(1,
'the identifer "%s" has not the same config property as the'
' other nodes in the unique expression'),
'ILLEGAL_ESCAPE':
(1,
'the escape sequence "\%s" is illegal in double quoted strings'),
'ILLEGAL_ESCAPE_WARN':
(4,
'the escape sequence "\%s" is unsafe in double quoted strings' \
' - pass the flag --lax-quote-checks to avoid this warning'),
'UNIQUE_IS_KEY':
(4,
'all keys in the list are redundantly present in the unique statement'),
'DUPLICATE_KEY':
(2,
'the key "%s" must not be listed more than once'),
'DUPLICATE_UNIQUE':
(3,
'the leaf "%s" occurs more than once in the unique expression'),
'PATTERN_ERROR':
(2,
'syntax error in pattern: %s'),
'PATTERN_FAILURE':
(4,
'could not verify pattern: %s'),
'LEAFREF_TOO_MANY_UP':
(1,
'the path for %s at %s has too many ".."'),
'LEAFREF_IDENTIFIER_NOT_FOUND':
(1,
'%s:%s in the path for %s at %s is not found'),
'LEAFREF_IDENTIFIER_BAD_NODE':
(1,
'%s:%s in the path for %s at %s references a %s node'),
'LEAFREF_BAD_PREDICATE':
(1,
'%s:%s in the path for %s at %s has a predicate, '
'but is not a list'),
'LEAFREF_BAD_PREDICATE_PTR':
(1,
'%s:%s in the path\'s predicate for %s at %s is compared '
'with a node that is not a leaf'),
'LEAFREF_NOT_LEAF':
(1,
'the path for %s at %s does not refer to a leaf'),
'LEAFREF_NO_KEY':
(1,
'%s:%s in the path for %s at %s is not the name of a key leaf'),
'LEAFREF_MULTIPLE_KEYS':
(1,
'%s:%s in the path for %s at %s is referenced more than once'),
'LEAFREF_BAD_CONFIG':
(1,
'the path for %s is config but refers to a '
'non-config leaf %s defined at %s'),
'LEAFREF_DEREF_NOT_LEAFREF':
(1,
'the deref argument refers to node %s at %s which is'
' not a leafref leaf'),
'LEAFREF_DEREF_NOT_KEY':
(1,
'the deref argument refers to node %s at %s which'
' does not refer to a key (%s at %s)'),
'LEAFREF_DEREF_NOT_LEAFREF':
(1,
'the deref argument for %s at %s does not refer to a leafref leaf'),
'DUPLICATE_CHILD_NAME':
(1,
'there is already a child node to "%s" at %s with the name "%s" '
'defined at %s'),
'BAD_TYPE_NAME':
(1,
'illegal type name "%s"'),
'TYPE_ALREADY_DEFINED':
(1,
'type name "%s" is already defined at %s'),
'GROUPING_ALREADY_DEFINED':
(1,
'grouping name "%s" is already defined at %s'),
'FEATURE_ALREADY_DEFINED':
(1,
'feature name "%s" is already defined at %s'),
'IDENTITY_ALREADY_DEFINED':
(1,
'identity name "%s" is already defined at %s'),
'EXTENSION_ALREADY_DEFINED':
(1,
'extension name "%s" is already defined at %s'),
'BAD_RESTRICTION':
(1,
'restriction %s not allowed for this base type'),
'BAD_DEFAULT_VALUE':
(1,
'the type "%s" cannot have a default value'),
'MISSING_TYPE_SPEC':
(1,
'a type %s must have at least one %s statement'),
'MISSING_TYPE_SPEC_1':
(1,
'a type %s must have a %s statement'),
'BAD_TYPE_IN_UNION':
(1,
'the type %s (defined at %s) cannot be part of a union'),
'BAD_TYPE_IN_KEY':
(1,
'the type %s cannot be part of a key, used by leaf %s'),
'KEY_BAD_SUBSTMT':
(1,
'the statement %s cannot be given for a key'),
'DEFAULT_AND_MANDATORY':
(1,
'a \'default\' value cannot be given when \'mandatory\' is "true"'),
'DEFAULT_AND_MIN_ELEMENTS':
(1,
'a \'default\' value cannot be given when \'min-elements\' is'
' greater than 0'),
'DUPLICATE_DEFAULT':
(1,
'the default value %s is given twice in the leaf list'),
'CURRENT_USES_DEPRECATED':
(2,
'the %s definiton is current, but the %s it references is deprecated'),
'CURRENT_USES_OBSOLETE':
(2,
'the %s definiton is current, but the %s it references is obsolete'),
'DEPRECATED_USES_OBSOLETE':
(3,
'the %s definiton is deprecated, but the %s it references is obsolete'),
'REVISION_ORDER':
(4,
'the revision statements are not given in reverse chronological order'),
'EXTENSION_ARGUMENT_PRESENT':
(1,
'unexpected argument for extension %s'),
'EXTENSION_NO_ARGUMENT_PRESENT':
(1,
'expected argument for extension %s'),
'SYNTAX_ERROR':
(1,
'syntax error: %s'),
'DUPLICATE_NAMESPACE':
(1,
'duplicate namespace uri %s found in module %s'),
'MISSING_ARGUMENT_ATTRIBUTE':
(1,
'missing argument attribute "%s" for "%s"'),
'MISSING_ARGUMENT_ELEMENT':
(1,
'missing argument element "%s" for "%s"'),
'UNEXPECTED_ATTRIBUTE':
(1,
'unexpected attribute %s'),
'INVALID_CONFIG':
(2,
'config true cannot be set when the parent is config false'),
'XPATH_SYNTAX_ERROR':
(2,
'XPath syntax error: %s'),
'XPATH_VARIABLE':
(2,
'XPath variable "%s" is not defined in the XPath context'),
'XPATH_FUNCTION':
(2,
'XPath function "%s" is not defined in the XPath context'),
'AUGMENT_MANDATORY':
(1,
'cannot augment with mandatory node %s'),
'LONG_IDENTIFIER':
(3,
'identifier %s exceeds %s characters'),
'CONFIG_IGNORED':
(4,
'explicit config statement is ignored'),
'UNUSED_IMPORT':
(4,
'imported module %s not used'),
'UNUSED_TYPEDEF':
(4,
'locally scoped typedef %s not used'),
'UNUSED_GROUPING':
(4,
'locally scoped grouping %s not used'),
'KEY_HAS_DEFAULT':
(4,
'default value for a key leaf is ignored'),
'KEY_HAS_MANDATORY_FALSE':
(4,
'"mandatory" statement for a key leaf is ignored'),
'LONG_LINE':
(4,
'line length %s exceeds %s characters'),
'STRICT_XPATH_FUNCTION':
(2,
'XPath function "%s" is not allowed for strict YANG compliance'),
}
def add_error_code(tag, level, fmt):
"""Add an error code to the framework.
Can be used by plugins to add special errors."""
error_codes[tag] = (level, fmt)
def err_level(tag):
try:
(level, fmt) = error_codes[tag]
return level
except KeyError:
return 0
def err_to_str(tag, args):
try:
(level, fmt) = error_codes[tag]
return fmt % args
except KeyError:
return 'unknown error %s' % tag
def err_add(errors, pos, tag, args):
error = (copy.copy(pos), tag, args)
# surely this can be done more elegant??
for (p, t, a) in errors:
if (p.line == pos.line and p.ref == pos.ref and
p.top == pos.top and t == tag and a == args):
return
errors.append(error)
def is_warning(level):
return not is_error(level)
def is_error(level):
return level < 4
def allow_warning(level):
return level > 2
|
py | 1a416ed52bbbe156acb0ae1c29d2ae5e296ed8fd | """
settings module - Subclasses of built-ins with some common accessible attributes
"""
class Setting(object):
pass
class IntSetting(int, Setting):
pass
class FloatSetting(float, Setting):
pass
class ListSetting(list, Setting):
pass
|
py | 1a416ee4484d209a88c08650625f20eadcda440b | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import mock
from shade import task_manager
from shade.tests.unit import base
class TestException(Exception):
pass
class TaskTest(task_manager.Task):
def main(self, client):
raise TestException("This is a test exception")
class TaskTestGenerator(task_manager.Task):
def main(self, client):
yield 1
class TaskTestInt(task_manager.Task):
def main(self, client):
return int(1)
class TaskTestFloat(task_manager.Task):
def main(self, client):
return float(2.0)
class TaskTestStr(task_manager.Task):
def main(self, client):
return "test"
class TaskTestBool(task_manager.Task):
def main(self, client):
return True
class TaskTestSet(task_manager.Task):
def main(self, client):
return set([1, 2])
class TaskTestAsync(task_manager.Task):
def __init__(self):
super(task_manager.Task, self).__init__()
self.run_async = True
def main(self, client):
pass
class TestTaskManager(base.TestCase):
def setUp(self):
super(TestTaskManager, self).setUp()
self.manager = task_manager.TaskManager(name='test', client=self)
def test_wait_re_raise(self):
"""Test that Exceptions thrown in a Task is reraised correctly
This test is aimed to six.reraise(), called in Task::wait().
Specifically, we test if we get the same behaviour with all the
configured interpreters (e.g. py27, p34, pypy, ...)
"""
self.assertRaises(TestException, self.manager.submit_task, TaskTest())
def test_dont_munchify_int(self):
ret = self.manager.submit_task(TaskTestInt())
self.assertIsInstance(ret, int)
def test_dont_munchify_float(self):
ret = self.manager.submit_task(TaskTestFloat())
self.assertIsInstance(ret, float)
def test_dont_munchify_str(self):
ret = self.manager.submit_task(TaskTestStr())
self.assertIsInstance(ret, str)
def test_dont_munchify_bool(self):
ret = self.manager.submit_task(TaskTestBool())
self.assertIsInstance(ret, bool)
def test_dont_munchify_set(self):
ret = self.manager.submit_task(TaskTestSet())
self.assertIsInstance(ret, set)
@mock.patch.object(concurrent.futures.ThreadPoolExecutor, 'submit')
def test_async(self, mock_submit):
self.manager.submit_task(TaskTestAsync())
self.assertTrue(mock_submit.called)
|
py | 1a416f1815e1bcb1aaadb16b689c5a3a2960edaf | from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
import logging
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import verify_dir
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(clean(output_string.lower()))
logger.info("Saved processed files to {}.".format(output_dir))
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
self.temp_dir = temp_dir
self.log = log.get_global_console_logger()
self.log.setLevel(logging.WARNING)
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
def sent_split_to_string(s): return "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = [model_filename_pattern.replace('#ID#', id)]
# model_filenames = Rouge155.__get_model_filenames_for_id(
# id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp(dir=self.temp_dir)
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
# 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp(dir=self.temp_dir)
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
# '-2',
# '-1',
# '-U',
'-m',
# '-v',
'-r', 1000,
'-n', 2,
# '-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
|
py | 1a416fa70c799a330e8192c0e60d710fec1f41d5 | from __future__ import print_function
import pprint
from collections import OrderedDict
from nose.tools import assert_equal
from tools import unit
from xdress.doxygen import class_docstr, func_docstr
car_dict = {'file_name': 'Cars.h',
'kls_name': 'util::Car',
'members': {'methods': ['Car',
'Car',
'Car',
'navigate',
'traffic',
'isValid',
'~Car'],
'variables': ['nwheels', 'maxrpm', 'maxspeed', 'manufacturer']},
'namespace': 'util',
'protected-attrib': {'manufacturer': {'briefdescription': '',
'definition': 'str util::Car::manufacturer',
'detaileddescription': 'The manufacturer of the car. This could be anything from Saturn to Porche. ',
'type': 'str'},
'maxrpm': {'briefdescription': '',
'definition': 'double util::Car::maxrmp',
'detaileddescription': 'The maximum rmp this car can attain',
'type': 'double'},
'maxspeed': {'briefdescription': 'The top speed of the car',
'definition': 'double util::Car::maxspeed',
'detaileddescription': '',
'type': 'double'},
'nwheels': {'briefdescription': 'The number of wheels on the car. ',
'definition': 'uint util::Car::nwheels',
'detaileddescription': '',
'type': 'uint'}},
'public-func': {'Car': {'arg_string': '()',
'args': None,
'briefdescription': 'Default constructor. ',
'definition': 'util::Car::Car',
'detaileddescription': 'A very simple car class that can do the basics. This car can navigate, get a traffic report, and verify that it is indeed a valid car. ',
'ret_type': None},
'Car1': {'arg_string': '(const Car &other)',
'args': OrderedDict({'other': {'type': 'const '}}),
'briefdescription': 'Copy constructor. This literally makes a clone of the Car that is passed in.',
'definition': 'util::Car::Car',
'detaileddescription': '',
'ret_type': None},
'Car2': {'arg_string': '(uint nwheels, str manufacturer)',
'args': OrderedDict({'manufacturer': {'type': 'str'},
'nwheels': {'type': 'uint'}}),
'briefdescription': '',
'definition': 'util::Car::Car',
'detaileddescription': 'Construct a car by specifying how many wheels it should have and who the manufacturer is.',
'ret_type': None},
'isValid': {'arg_string': '()',
'args': None,
'briefdescription': 'Checks if the object is really a car. Basically sees that is has all the components of a car.',
'definition': 'bool util::Car::isValid',
'detaileddescription': '',
'ret_type': 'bool'},
'navigate': {'arg_string': '(str where, float32 howFast, Date when)',
'args': OrderedDict([('where', {'type': 'str'}),
('howFast', {'type': 'float32'}),
('when', {'type': 'Date'}),
]),
'briefdescription': 'Has the car drive to a specified location',
'definition': 'std::vector< int32> util::Car::navigate',
'detaileddescription': '',
'ret_type': 'std::vector< uint32 >'},
'traffic': {'arg_string': '(std::vector< int32 > &coord) const',
'args': OrderedDict({'coord': {'type': 'std::vector< unit32 > const &'}}),
'briefdescription': '',
'definition': 'str util::Car::traffic',
'detaileddescription': 'Check the traffic at a given location. The input parameter is a vector of integers specifying the latitude and longitude of the position where the traffic should be checked.',
'ret_type': 'str'},
'~Car': {'arg_string': '()',
'args': None,
'briefdescription': 'A destructor. ',
'definition': 'hbs::Car::~Car',
'detaileddescription': '',
'ret_type': None}}}
@unit
def test_classdocstr():
exp = \
"""A very simple car class that can do the basics. This car can
navigate, get a traffic report, and verify that it is indeed a valid
car.
Attributes
----------
nwheels (uint) : The number of wheels on the car.
maxrpm (double) : The maximum rmp this car can attain
maxspeed (double) : The top speed of the car
manufacturer (str) : The manufacturer of the car. This could be
anything from Saturn to Porche.
Methods
-------
Car
~Car
isValid
navigate
traffic
Notes
-----
This class was defined in Cars.h
The class is found in the "util" namespace
"""
actual = class_docstr(car_dict)
print('-------- Expected Class docstring --------')
print(exp)
print('-------- Actual Class docstring --------')
print(actual)
# Strip whitespace before testing b/c editor config
assert_equal(exp.strip(), actual.strip())
@unit
def test_funcdocstr():
exp = \
"""Has the car drive to a specified location
Parameters
----------
where : str
howFast : float32
when : Date
Returns
-------
res1 : std::vector< uint32 >
"""
actual = func_docstr(car_dict['public-func']['navigate'], is_method=True)
print('-------- Expected Class docstring --------')
print(exp)
print('-------- Actual Class docstring --------')
print(actual)
# Strip whitespace before testing b/c editor config
assert_equal(exp.strip(), actual.strip())
|
py | 1a41705493d711f64624d033d9e4c96bc540a206 | from tqdm import tqdm
import torch
from .utils import get_cosine_schedule
from . import mcmc
import math
from .exp_utils import evaluate_model
class SGLDRunner:
def __init__(self, model, dataloader, dataloader_test, epochs_per_cycle, warmup_epochs,
sample_epochs, learning_rate=1e-2, skip=1, metrics_skip=1,
temperature=1., data_mult=1., momentum=0., sampling_decay=True,
grad_max=1e6, cycles=1, precond_update=None,
metrics_saver=None, model_saver=None, reject_samples=False):
"""Stochastic Gradient Langevin Dynamics for posterior sampling.
On calling `run`, this class runs SGLD for `cycles` sampling cycles. In
each cycle, there are 3 phases: descent, warmup and sampling. The cycle
lasts for `epochs_per_cycle` epochs in total, and the warmup and
sampling phases last for `warmup_epochs` and `sample_epochs` epochs
respectively.
The descent phase performs regular gradient descent with momentum, i.e.
SGLD with temperature=0. The warmup phase raises the temperature to 1.
During the sample phase, samples get stored.
The learning rate keep decreasing all throughout the cycle following a
cosine function, from learning_rate=1 at the beginning to
learning_rate=0 at the end.
The preconditioner gets updated every `precond_update` epochs,
regardless of the phase in the cycle.
Args:
model (torch.Module, PriorMixin): BNN model to sample from
num_data (int): Number of datapoints in training sest
warmup_epochs (int): Number of epochs per cycle for warming up the Markov chain, at the beginning.
sample_epochs (int): Number of epochs per cycle where the samples are kept, at the end.
learning_rate (float): Initial learning rate
skip (int): Number of samples to skip between saved samples during the sampling phase. Sometimes called "thinning".
metrics_skip (int): Number of samples to skip between saved metrics of the sampler
temperature (float): Temperature for tempering the posterior
data_mult (float): Effective replication of each datapoint (which is the usual approach to tempering in VI).
momentum (float): Momentum decay parameter for SGLD
sampling_decay (bool): Flag to control whether the learning rate should decay during sampling
grad_max (float): maximum absolute magnitude of an element of the gradient
cycles (int): Number of warmup and sampling cycles to perform
precond_update (int): Number of steps after which the preconditioner should be updated. None disables the preconditioner.
metrics_saver : HDF5Metrics to log metric with a certain name and value
"""
self.model = model
self.dataloader = dataloader
self.dataloader_test = dataloader_test
assert warmup_epochs >= 0
assert sample_epochs >= 0
assert epochs_per_cycle >= warmup_epochs + sample_epochs
self.epochs_per_cycle = epochs_per_cycle
self.descent_epochs = epochs_per_cycle - warmup_epochs - sample_epochs
self.warmup_epochs = warmup_epochs
self.sample_epochs = sample_epochs
self.skip = skip
self.metrics_skip = metrics_skip
# num_samples (int): Number of recorded per cycle
self.num_samples = sample_epochs // skip
assert sample_epochs % skip == 0
self.learning_rate = learning_rate
self.temperature = temperature
self.eff_num_data = len(dataloader.dataset) * data_mult
self.momentum = momentum
self.sampling_decay = sampling_decay
self.grad_max = grad_max
self.cycles = cycles
self.precond_update = precond_update
self.metrics_saver = metrics_saver
self.model_saver = model_saver
if model_saver is None:
self._samples = {
name: torch.zeros(torch.Size([self.num_samples*cycles])+p_or_b.shape, dtype=p_or_b.dtype)
for name, p_or_b in model.state_dict().items()}
self._samples["steps"] = torch.zeros(torch.Size([self.num_samples*cycles]), dtype=torch.int64)
self.param_names, self._params = zip(*model.named_parameters())
self.reject_samples = reject_samples
def _make_optimizer(self, params):
assert self.reject_samples is False, "SGLD cannot reject samples"
return mcmc.SGLD(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data,
momentum=self.momentum, temperature=self.temperature)
def _make_scheduler(self, optimizer):
if self.sampling_decay is True or self.sampling_decay == "cosine":
schedule = get_cosine_schedule(
len(self.dataloader) * self.epochs_per_cycle)
return torch.optim.lr_scheduler.LambdaLR(
optimizer=optimizer, lr_lambda=schedule)
elif self.sampling_decay is False or self.sampling_decay == "stairs":
return torch.optim.lr_scheduler.StepLR(
optimizer, 150*len(self.dataloader), gamma=0.1)
elif self.sampling_decay == "flat":
# No-op scheduler
return torch.optim.lr_scheduler.StepLR(optimizer, 2**30, gamma=1.0)
raise ValueError(f"self.sampling_decay={self.sampling_decay}")
def run(self, progressbar=False):
"""
Runs the sampling on the model.
Args:
x (torch.tensor): Training input data
y (torch.tensor): Training labels
progressbar (bool): Flag that controls whether a progressbar is printed
"""
self.optimizer = self._make_optimizer(self._params)
self.optimizer.sample_momentum()
self.scheduler = self._make_scheduler(self.optimizer)
self.metrics_saver.add_scalar("test/log_prob", math.nan, step=-1)
self.metrics_saver.add_scalar("test/acc", math.nan, step=-1)
def _is_sampling_epoch(_epoch):
_epoch = _epoch % self.epochs_per_cycle
sampling_epoch = _epoch - (self.descent_epochs + self.warmup_epochs)
return (0 <= sampling_epoch) and (sampling_epoch % self.skip == 0)
step = -1 # used for `self.metrics_saver.add_scalar`, must start at 0 and never reset
postfix = {}
for cycle in range(self.cycles):
if progressbar:
epochs = tqdm(range(self.epochs_per_cycle), position=0,
leave=True, desc=f"Cycle {cycle}, Sampling", mininterval=2.0)
else:
epochs = range(self.epochs_per_cycle)
for epoch in epochs:
for g in self.optimizer.param_groups:
g['temperature'] = 0. if epoch < self.descent_epochs else self.temperature
for i, (x, y) in enumerate(self.dataloader):
step += 1
store_metrics = (
i == 0 # The start of an epoch
or step % self.metrics_skip == 0)
initial_step = (
step == 0 # The very first step
or
# This is the first step after a sampling epoch
(i == 0 and _is_sampling_epoch(epoch-1)))
loss, acc, delta_energy = self.step(
step, x.to(self._params[0].device).detach(), y.to(self._params[0].device).detach(),
store_metrics=store_metrics,
initial_step=initial_step)
if progressbar and store_metrics:
postfix["train/loss"] = loss.item()
postfix["train/acc"] = acc.item()
if delta_energy is not None:
postfix["Δₑ"] = delta_energy
epochs.set_postfix(postfix, refresh=False)
if self.precond_update is not None and epoch % self.precond_update == 0:
self.optimizer.update_preconditioner()
state_dict = self.model.state_dict()
if _is_sampling_epoch(epoch):
self._save_sample(state_dict, cycle, epoch, step)
results = self._evaluate_model(state_dict, step)
if progressbar:
postfix.update(results)
epochs.set_postfix(postfix, refresh=False)
# Important to put here because no new metrics are added
# Write metrics to disk every 30 seconds
self.metrics_saver.flush(every_s=10)
# Save metrics for the last sample
(x, y) = next(iter(self.dataloader))
self.step(step+1,
x.to(self._params[0].device),
y.to(self._params[0].device),
store_metrics=True, initial_step=_is_sampling_epoch(-1))
def _save_sample(self, state_dict, cycle, epoch, step):
# TODO: refactor this into two `model_saver` classes
sampling_epoch = epoch - (self.descent_epochs + self.warmup_epochs)
if self.model_saver is None:
for name, param in state_dict.items():
self._samples[name][(self.num_samples*cycle)+(sampling_epoch//self.skip)] = param
else:
self.model_saver.add_state_dict(state_dict, step)
self.model_saver.flush()
def _evaluate_model(self, state_dict, step):
if len(self.dataloader_test) == 0:
return {}
self.model.eval()
state_dict = {k: v.unsqueeze(0) for k, v in state_dict.items()}
results = evaluate_model(
self.model, self.dataloader_test, state_dict,
likelihood_eval=True, accuracy_eval=True, calibration_eval=False)
self.model.train()
results = {"test/loss": -results["lp_last"],
"test/acc": results["acc_last"]}
for k, v in results.items():
self.metrics_saver.add_scalar(k, v, step)
return results
def _model_potential_and_grad(self, x, y):
self.optimizer.zero_grad()
loss, log_prior, potential, accs_batch, _ = self.model.split_potential_and_acc(x, y, self.eff_num_data)
potential.backward()
for p in self.optimizer.param_groups[0]["params"]:
p.grad.clamp_(min=-self.grad_max, max=self.grad_max)
if torch.isnan(potential).item():
raise ValueError("Potential is NaN")
return loss, log_prior, potential, accs_batch.mean()
def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):
"""
Perform one step of SGLD on the model.
Args:
x (torch.Tensor): Training input data
y (torch.Tensor): Training labels
lr_decay (bool): Flag that controls whether the learning rate should decay after this step
Returns:
loss (float): The current loss of the model for x and y
"""
loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)
self.optimizer.step(calc_metrics=store_metrics)
lr = self.optimizer.param_groups[0]["lr"]
if lr_decay:
self.scheduler.step()
if store_metrics:
# The metrics are valid for the previous step.
self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),
potential=potential.item(), acc=acc.item(), lr=lr,
corresponds_to_sample=initial_step)
return loss, acc, None
def get_samples(self):
"""
Returns the acquired SGLD samples from the last run.
Returns:
samples (dict): Dictionary of torch.tensors with num_samples*cycles samples for each parameter of the model
"""
if self.model_saver is None:
return {k: v for (k, v) in self._samples.items() if k != "steps"}
return self.model_saver.load_samples(keep_steps=False)
def store_metrics(self, i, loss, log_prior, potential, acc, lr,
corresponds_to_sample: bool,
delta_energy=None, total_energy=None, rejected=None):
est_temperature_all = 0.
est_config_temp_all = 0.
all_numel = 0
add_scalar = self.metrics_saver.add_scalar
for n, p in zip(self.param_names, self.optimizer.param_groups[0]["params"]):
state = self.optimizer.state[p]
add_scalar("preconditioner/"+n, state["preconditioner"], i)
add_scalar("est_temperature/"+n, state["est_temperature"], i)
add_scalar("est_config_temp/"+n, state["est_config_temp"], i)
est_temperature_all += state["est_temperature"] * p.numel()
est_config_temp_all += state["est_config_temp"] * p.numel()
all_numel += p.numel()
add_scalar("est_temperature/all", est_temperature_all / all_numel, i)
add_scalar("est_config_temp/all", est_config_temp_all / all_numel, i)
temperature = self.optimizer.param_groups[0]["temperature"]
add_scalar("temperature", temperature, i)
add_scalar("loss", loss, i)
add_scalar("acc", acc, i)
add_scalar("log_prior", log_prior, i)
add_scalar("potential", potential, i)
add_scalar("lr", lr, i)
add_scalar("acceptance/is_sample", int(corresponds_to_sample), i)
if delta_energy is not None:
add_scalar("delta_energy", delta_energy, i)
add_scalar("total_energy", total_energy, i)
if rejected is not None:
add_scalar("acceptance/rejected", int(rejected), i)
class VerletSGLDRunner(SGLDRunner):
def _make_optimizer(self, params):
return mcmc.VerletSGLD(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data,
momentum=self.momentum, temperature=self.temperature)
def step(self, i, x, y, store_metrics, lr_decay=True, initial_step=False):
loss, log_prior, potential, acc = self._model_potential_and_grad(x, y)
lr = self.optimizer.param_groups[0]["lr"]
rejected = None
delta_energy = None
if i == 0:
# The very first step
if isinstance(self.optimizer, mcmc.HMC):
# momentum should be sampled already, but it does not hurt to
# sample again.
self.optimizer.sample_momentum()
self.optimizer.initial_step(
calc_metrics=True, save_state=self.reject_samples)
if self.reject_samples:
rejected = False # the first sample is what we have.
elif initial_step:
# Calculate metrics using the possible sample's parameter (which is
# not modified), its gradient, and the new momentum as updated by
# `final_step`.
self.optimizer.final_step(calc_metrics=True)
delta_energy = self.optimizer.delta_energy(self._initial_potential, potential)
if self.reject_samples:
rejected, _ = self.optimizer.maybe_reject(delta_energy)
# The first step of an epoch, but not the very first
if isinstance(self.optimizer, mcmc.HMC):
self.optimizer.sample_momentum()
self.optimizer.initial_step(
calc_metrics=False, save_state=self.reject_samples)
else:
# Any intermediate step
self.optimizer.step(calc_metrics=store_metrics)
if i == 0:
# Very first step
store_metrics = True
total_energy = delta_energy = self.optimizer.delta_energy(0., 0.)
self._initial_potential = potential.item()
self._total_energy = 0.
elif initial_step:
# First step of an epoch
store_metrics = True
self._initial_potential = potential.item()
self._total_energy += delta_energy
total_energy = self._total_energy
else:
# Any step
if store_metrics:
delta_energy = self.optimizer.delta_energy(self._initial_potential, loss)
total_energy = self._total_energy + delta_energy
if store_metrics:
# The metrics are valid for the previous step.
self.store_metrics(i=i-1, loss=loss.item(), log_prior=log_prior.item(),
potential=potential.item(), acc=acc.item(), lr=lr,
delta_energy=delta_energy,
total_energy=total_energy, rejected=rejected,
corresponds_to_sample=initial_step)
if lr_decay:
self.scheduler.step()
return loss, acc, delta_energy
class HMCRunner(VerletSGLDRunner):
def _make_optimizer(self, params):
assert self.temperature == 1.0, "HMC only implemented for temperature=1."
assert self.momentum == 1.0, "HMC only works with momentum=1."
assert self.descent_epochs == 0, "HMC not implemented for descent epochs with temp=0."
return mcmc.HMC(
params=params,
lr=self.learning_rate, num_data=self.eff_num_data)
|
py | 1a417113745b3b243b376631d6d2bafd2d7e0eb8 | from dataclasses import dataclass, field
__NAMESPACE__ = "ElemDecl/valueConstraint"
@dataclass
class Root:
class Meta:
name = "root"
namespace = "ElemDecl/valueConstraint"
value: str = field(
init=False,
default="1.0E-2",
metadata={
"required": True,
"pattern": r"...E..",
}
)
|
py | 1a4171945cec6448d4d9a102dc57ba80337c78cc | """sneh_figma_test_app_22681 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Sneh_Figma_Test_App"
admin.site.site_title = "Sneh_Figma_Test_App Admin Portal"
admin.site.index_title = "Sneh_Figma_Test_App Admin"
# swagger
api_info = openapi.Info(
title="Sneh_Figma_Test_App API",
default_version="v1",
description="API documentation for Sneh_Figma_Test_App App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
py | 1a4171c1aa1cf14827d9c11e34a228cc1a2718a9 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
PYTHON_VERSION_COMPATIBILITY = "PY3"
DEPS = [
'build',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'run',
'vars',
]
def RunSteps(api):
api.vars.setup()
checkout_root = api.vars.cache_dir.join('work')
out_dir = checkout_root.join(
'skia', 'out', api.vars.builder_name, api.vars.configuration)
api.build(checkout_root=checkout_root, out_dir=out_dir)
dst = api.vars.swarming_out_dir.join('out', api.vars.configuration)
api.build.copy_build_products(out_dir=out_dir, dst=dst)
api.run.check_failure()
TEST_BUILDERS = [
'Build-Debian9-Clang-arm-Release-Flutter_Android_Docker',
'Build-Debian10-GCC-x86-Debug-Docker',
'Build-Debian10-GCC-x86_64-Debug-Docker',
'Build-Debian10-GCC-x86_64-Release-NoGPU_Docker',
'Build-Debian10-GCC-x86_64-Release-Shared_Docker',
'Build-Debian10-Clang-arm-Release-Android_API26',
'Build-Debian10-Clang-arm-Release-Android_ASAN',
'Build-Debian10-Clang-arm-Release-Chromebook_GLES',
'Build-Debian10-Clang-arm-Release-Flutter_Android',
'Build-Debian10-Clang-arm64-Debug-Android_HWASAN',
'Build-Debian10-Clang-arm64-Release-Android_Wuffs',
'Build-Debian10-Clang-x86_64-Debug-Chromebook_GLES',
'Build-Debian10-Clang-x86_64-Debug-Coverage',
'Build-Debian10-Clang-x86_64-Debug-MSAN',
'Build-Debian10-Clang-x86_64-Debug-SK_CPU_LIMIT_SSE41',
'Build-Debian10-Clang-x86_64-Debug-SafeStack',
'Build-Debian10-Clang-x86_64-Debug-SwiftShader_MSAN',
'Build-Debian10-Clang-x86_64-Debug-TSAN',
'Build-Debian10-Clang-x86_64-Debug-Tidy',
'Build-Debian10-Clang-x86_64-Debug-Vulkan_TSAN',
'Build-Debian10-Clang-x86_64-Debug-Wuffs',
'Build-Debian10-Clang-x86_64-Release-ANGLE',
'Build-Debian10-Clang-x86_64-Release-ASAN',
'Build-Debian10-Clang-x86_64-Release-CMake',
'Build-Debian10-Clang-x86_64-Release-Fast',
'Build-Debian10-Clang-x86_64-Release-NoDEPS',
'Build-Debian10-Clang-x86_64-Release-Static',
'Build-Debian10-Clang-x86_64-Release-SwiftShader',
'Build-Debian10-Clang-x86_64-Release-Vulkan',
'Build-Debian10-EMCC-asmjs-Debug-PathKit',
'Build-Debian10-EMCC-asmjs-Release-PathKit',
'Build-Debian10-EMCC-wasm-Debug-CanvasKit',
'Build-Debian10-EMCC-wasm-Debug-PathKit',
'Build-Debian10-EMCC-wasm-Release-CanvasKit_CPU',
'Build-Debian10-EMCC-wasm-Release-PathKit',
'Build-Mac-Clang-arm64-Debug-Android_Vulkan',
'Build-Mac-Clang-arm64-Debug-iOS',
"Build-Mac-Clang-arm64-Debug-Graphite",
"Build-Mac-Clang-arm64-Debug-Graphite_NoGpu",
"Build-Mac-Clang-arm64-Release-Graphite",
'Build-Mac-Xcode11.4.1-arm64-Debug-iOS',
'Build-Mac-Clang-x86_64-Debug-ASAN',
'Build-Mac-Clang-x86_64-Debug-CommandBuffer',
'Build-Mac-Clang-x86_64-Debug-Metal',
'Build-Win-Clang-arm64-Release-Android',
'Build-Win-Clang-x86-Debug-Exceptions',
'Build-Win-Clang-x86_64-Debug-ANGLE',
'Build-Win-Clang-x86_64-Release-Direct3D',
'Build-Win-Clang-x86_64-Release-Shared',
"Build-Win-Clang-x86_64-Release-Dawn",
'Build-Win-Clang-x86_64-Release-Vulkan',
'Housekeeper-PerCommit-CheckGeneratedFiles',
]
# Default properties used for TEST_BUILDERS.
defaultProps = lambda buildername: dict(
buildername=buildername,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
patch_set=2,
swarm_out_dir='[SWARM_OUT_DIR]'
)
def GenTests(api):
for buildername in TEST_BUILDERS:
test = (
api.test(buildername) +
api.properties(**defaultProps(buildername))
)
if 'Win' in buildername:
test += api.platform('win', 64)
yield test
yield (
api.test('unknown-docker-image') +
api.properties(**defaultProps('Build-Unix-GCC-x86_64-Release-Docker')) +
api.expect_exception('Exception')
)
|
py | 1a4171e04584a6513e286dbe9a69316548dd3a17 | """
This module contains the classes for Nodes, Arcs used in the optimizer with mnetgen format,
for the solvers.
"""
from typing import List
class Arc:
def __init__(
self,
name: int,
from_node: int,
to_node: int,
commodity: int,
cost: float,
capacity: int,
mutual_capacity_id: int,
):
self.name = name
self.from_node = from_node
self.to_node = to_node
self.commodity = commodity
self.cost = cost
self.capacity = capacity
self.mutual_capacity_id = mutual_capacity_id
def __repr__(self):
return self.__str__()
def __str__(self):
return "Arc(%s,%s,%s,%s,%s,%s,%s)" % (
self.name,
self.from_node,
self.to_node,
self.commodity,
self.cost,
self.capacity,
self.mutual_capacity_id,
)
class Node:
def __init__(self, node_id: int, commodity: int, supply: int):
self.node_id = node_id
self.commodity = commodity
self.supply = supply
def __repr__(self):
return self.__str__()
def __str__(self):
return "Node(%s,%s,%s)" % (self.node_id, self.commodity, self.supply)
class MutualCapacity:
def __init__(self, mutual_capacity_id: int, capacity: int):
self.mutual_capacity_id = mutual_capacity_id
self.capacity = capacity
class MnetgenFormatWriter:
__SEP = "\t"
def __init__(
self, nodes: List[Node], arcs: List[Arc], capacities: List[MutualCapacity]
):
self.nodes = nodes
self.arcs = arcs
self.capacities = capacities
def write(self, dir: str, filename: str):
arc_lines = self.__arc_lines()
node_lines = self.__node_lines()
mutual_capacity_lines = self.__mutual_capacity_lines()
summary_lines = self.__nod_lines()
self.__write_lines(arc_lines, f"{dir}/{filename}.arc")
self.__write_lines(node_lines, f"{dir}/{filename}.sup")
self.__write_lines(mutual_capacity_lines, f"{dir}/{filename}.mut")
self.__write_lines(summary_lines, f"{dir}/{filename}.nod")
# Arc file (*.arc):
#
# < arc name > , < from node > , < to node > , < commodity > , < cost > ,
# < capacity > , < mutual capacity pointer >
def __arc_lines(self):
SEP = self.__SEP
arc_lines = []
for a in self.arcs:
arc_lines.append(
f"{a.name}{SEP}{a.from_node}{SEP}{a.to_node}{SEP}{a.commodity}{SEP}{a.cost}{SEP}{a.capacity}{SEP}{a.mutual_capacity_id}"
)
return arc_lines
# Node supply file (*.nod if FOUR_F == 0, *.sup otherwise):
#
# < node > , < commodity > , < supply >
def __node_lines(self):
SEP = self.__SEP
node_lines = []
for n in self.nodes:
node_lines.append(f"{n.node_id}{SEP}{n.commodity}{SEP}{n.supply}")
return node_lines
# Mutual capacity file (*.mut):
#
# < mutual capacity pointer > , < mutual capacity >
def __mutual_capacity_lines(self):
SEP = self.__SEP
mc_lines = []
for mc in self.capacities:
mc_lines.append(f"{mc.mutual_capacity_id}{SEP}{mc.capacity}")
return mc_lines
def __write_lines(self, ds: List[str], _writedir: str):
with open(_writedir, "w+") as f:
for i, line in enumerate(ds):
if i != len(ds) - 1:
f.write("%s\n" % line)
else:
f.write("%s" % line)
def __nod_lines(self):
SEP = self.__SEP
commodities = len(set([node.commodity for node in self.nodes]))
nodes = len(self.nodes)
arcs = len(self.arcs)
capacitated = sum(
[1 for arc in self.arcs if arc.mutual_capacity_id != 0]
) # first bundle is for uncapacitateds.
nod_line = f"{commodities}{SEP}{nodes}{SEP}{arcs}{SEP}{capacitated}"
print(f"nod_line {nod_line}")
return [nod_line]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.