blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b48fa35782b5a093d246a4325888a0a5482ada6
|
780fe51f58008bc901aef74baccacb22a796b308
|
/thrift/perf/py3/py3_server.py
|
8131991cc4b305b002cfb7b0bdf21e4e587a4f74
|
[
"Apache-2.0"
] |
permissive
|
joseroubert08/fbthrift
|
f2e9f3adb9cca4c5248070383310d4573136fbb5
|
8edc86b4c3b991039e110f378cfa1d8a19665b55
|
refs/heads/master
| 2021-01-16T19:00:49.105359 | 2017-01-15T04:42:45 | 2017-01-15T04:47:30 | 79,018,635 | 1 | 0 | null | 2017-01-15T09:15:45 | 2017-01-15T09:15:45 | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from argparse import ArgumentParser
import asyncio
import signal
import sys
from thrift.lib.py3.thrift_server import ThriftServer
from apache.thrift.test.py3.load_handler import LoadTestHandler
def main():
parser = ArgumentParser()
parser.add_argument(
'--port',
default=1234,
type=int,
help='Port to run on'
)
options = parser.parse_args()
loop = asyncio.get_event_loop()
handler = LoadTestHandler(loop)
server = ThriftServer(handler, options.port, loop=loop)
loop.add_signal_handler(signal.SIGINT, server.stop)
loop.add_signal_handler(signal.SIGTERM, server.stop)
print("Running Py3 server on port {}".format(options.port))
loop.run_until_complete(server.serve())
if __name__ == '__main__':
sys.exit(main())
|
[
"[email protected]"
] | |
a333ccba45a929a80bcfc1e647c92d5977fe109c
|
da0a7446122a44887fa2c4f391e9630ae033daa2
|
/python/ray/air/tests/test_resource_changing.py
|
2299c5dabb9a112d5349433b2a4a5c95bfbd96ac
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
whiledoing/ray
|
d8d9ba09b7545e8fd00cca5cfad451278e61fffd
|
9272bcbbcae1630c5bb2db08a8279f0401ce6f92
|
refs/heads/master
| 2023-03-06T16:23:18.006757 | 2022-07-22T02:06:47 | 2022-07-22T02:06:47 | 252,420,044 | 0 | 0 |
Apache-2.0
| 2023-03-04T08:57:20 | 2020-04-02T10:07:23 |
Python
|
UTF-8
|
Python
| false | false | 4,697 |
py
|
from ray.air import session
from ray.air.checkpoint import Checkpoint
from ray.air.config import FailureConfig, RunConfig, ScalingConfig
from ray.air.constants import TRAIN_DATASET_KEY
from ray.tune.tune_config import TuneConfig
from ray.tune.tuner import Tuner
from ray.train.data_parallel_trainer import DataParallelTrainer
from ray.train.xgboost import XGBoostTrainer
from sklearn.datasets import load_breast_cancer
import pandas as pd
import pytest
import ray
from ray import tune
from ray.tune.schedulers.resource_changing_scheduler import (
DistributeResources,
ResourceChangingScheduler,
)
from ray.tune.schedulers.async_hyperband import ASHAScheduler
@pytest.fixture
def ray_start_8_cpus():
address_info = ray.init(num_cpus=8)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
def train_fn(config):
start_epoch = 0
print(session.get_trial_resources())
checkpoint = session.get_checkpoint()
if checkpoint:
# assume that we have run the session.report() example
# and successfully save some model weights
checkpoint_dict = checkpoint.to_dict()
start_epoch = checkpoint_dict.get("epoch", -1) + 1
# wrap the model in DDP
for epoch in range(start_epoch, config["num_epochs"]):
checkpoint = Checkpoint.from_dict(dict(epoch=epoch))
session.report(
{
"metric": config["metric"] * epoch,
"epoch": epoch,
"num_cpus": session.get_trial_resources().required_resources["CPU"],
},
checkpoint=checkpoint,
)
class AssertingDataParallelTrainer(DataParallelTrainer):
def training_loop(self) -> None:
scaling_config = self._validate_scaling_config(self.scaling_config)
pgf = scaling_config.as_placement_group_factory()
tr = session.get_trial_resources()
assert pgf == tr, (pgf, tr)
return super().training_loop()
class AssertingXGBoostTrainer(XGBoostTrainer):
@property
def _ray_params(self):
scaling_config = self._validate_scaling_config(self.scaling_config)
assert (
scaling_config.as_placement_group_factory() == session.get_trial_resources()
)
return super()._ray_params
def test_data_parallel_trainer(ray_start_8_cpus):
num_workers = 2
trainer = AssertingDataParallelTrainer(
train_fn, scaling_config=ScalingConfig(num_workers=num_workers)
)
tuner = Tuner(
trainer,
param_space={
"train_loop_config": {
"num_epochs": 100,
"metric": tune.grid_search([1, 2, 3, 4, 5]),
}
},
tune_config=TuneConfig(
mode="max",
metric="metric",
scheduler=ResourceChangingScheduler(
ASHAScheduler(),
resources_allocation_function=DistributeResources(
add_bundles=True, reserve_resources={"CPU": 1}
),
),
),
run_config=RunConfig(failure_config=FailureConfig(fail_fast=True)),
)
result_grid = tuner.fit()
assert not any(x.error for x in result_grid)
# + 1 for Trainable
assert result_grid.get_dataframe()["num_cpus"].max() > num_workers + 1
def test_gbdt_trainer(ray_start_8_cpus):
data_raw = load_breast_cancer()
dataset_df = pd.DataFrame(data_raw["data"], columns=data_raw["feature_names"])
dataset_df["target"] = data_raw["target"]
train_ds = ray.data.from_pandas(dataset_df).repartition(16)
trainer = AssertingXGBoostTrainer(
datasets={TRAIN_DATASET_KEY: train_ds},
label_column="target",
scaling_config=ScalingConfig(num_workers=2),
params={
"objective": "binary:logistic",
"eval_metric": ["logloss"],
},
)
tuner = Tuner(
trainer,
param_space={
"num_boost_round": 100,
"params": {
"eta": tune.grid_search([0.28, 0.29, 0.3, 0.31, 0.32]),
},
},
tune_config=TuneConfig(
mode="min",
metric="train-logloss",
scheduler=ResourceChangingScheduler(
ASHAScheduler(),
resources_allocation_function=DistributeResources(
add_bundles=True, reserve_resources={"CPU": 1}
),
),
),
run_config=RunConfig(failure_config=FailureConfig(fail_fast=True)),
)
result_grid = tuner.fit()
assert not any(x.error for x in result_grid)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
[
"[email protected]"
] | |
71dbebb820dbd7f4b8c24505f4b638a923e62c66
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2475/60758/282446.py
|
47d5f768676492e85fb963899c2835b0bc9fed29
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
k=int(input())
for qqq in range(0,k):
n=int(input())
num=list(map(int,input().split()))
num.sort()
out=0
count=0
for i in range(n-1):
if(num[i]+1==num[i+1]):
count+=1
out=max(out,count)
else:
count=0
print(out)
|
[
"[email protected]"
] | |
89a87ed681efbb6b832737666b35f803561484c1
|
33226b2cf373cb2d1ceac94fbc2a1558c1fd4f65
|
/Simple_BBS/Regiest_models.py
|
e9072fef27185d2d6b34304fe880859e4fe39411
|
[] |
no_license
|
cnbjljf/simple_project
|
af54c6b494aa7f807e0bcfdd4fabfebf1a15cd76
|
95287682d7406be76c6dcd2974174fc2c1f4a372
|
refs/heads/master
| 2020-04-02T21:02:22.292978 | 2016-07-03T06:35:02 | 2016-07-03T06:35:02 | 62,478,177 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
#!/usr/bin/env python
'''
'''
import os
import sys
path = os.path.dirname( os.path.dirname( __file__ ) )
sys.path.append( path )
from django.contrib import admin
# register your models here
from django.apps import apps
from django.contrib.admin.sites import AlreadyRegistered
app_models = apps.get_app_config('Simple_BBS').get_models()
for model in app_models:
try:
admin.site.register(model)
except AlreadyRegistered:
pass
|
[
"[email protected]"
] | |
4831effd489528b9694cd5a1b45cb52a10ed6b7b
|
0f089307fe04a68569fe20bdb78be6ba43a589ea
|
/tests/helpers/test_restore_state.py
|
3a4c058f8534fdffa7677a0ae4d11a4d24057fec
|
[
"Apache-2.0"
] |
permissive
|
wanman/home-alone
|
6983d4e25bbf6b046cc6eaf426816c2a2dca4eea
|
633aaed22b0de0129d1e72e23bcd974b9ce13656
|
refs/heads/master
| 2020-05-21T08:19:00.077989 | 2017-03-10T22:14:31 | 2017-03-10T22:14:31 | 84,603,132 | 1 | 0 | null | 2017-03-10T22:21:20 | 2017-03-10T21:44:26 |
Python
|
UTF-8
|
Python
| false | false | 3,475 |
py
|
"""The tests for the Restore component."""
import asyncio
from datetime import timedelta
from unittest.mock import patch, MagicMock
from homeassistant.bootstrap import setup_component
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.core import CoreState, split_entity_id, State
import homeassistant.util.dt as dt_util
from homeassistant.components import input_boolean, recorder
from homeassistant.helpers.restore_state import (
async_get_last_state, DATA_RESTORE_CACHE)
from tests.common import (
get_test_home_assistant, mock_coro, init_recorder_component)
@asyncio.coroutine
def test_caching_data(hass):
"""Test that we cache data."""
hass.config.components.add('recorder')
hass.state = CoreState.starting
states = [
State('input_boolean.b0', 'on'),
State('input_boolean.b1', 'on'),
State('input_boolean.b2', 'on'),
]
with patch('homeassistant.helpers.restore_state.last_recorder_run',
return_value=MagicMock(end=dt_util.utcnow())), \
patch('homeassistant.helpers.restore_state.get_states',
return_value=states), \
patch('homeassistant.helpers.restore_state.async_get_instance',
return_value=mock_coro()):
state = yield from async_get_last_state(hass, 'input_boolean.b1')
assert DATA_RESTORE_CACHE in hass.data
assert hass.data[DATA_RESTORE_CACHE] == {st.entity_id: st for st in states}
assert state is not None
assert state.entity_id == 'input_boolean.b1'
assert state.state == 'on'
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
yield from hass.async_block_till_done()
assert DATA_RESTORE_CACHE not in hass.data
def _add_data_in_last_run(entities):
"""Add test data in the last recorder_run."""
# pylint: disable=protected-access
t_now = dt_util.utcnow() - timedelta(minutes=10)
t_min_1 = t_now - timedelta(minutes=20)
t_min_2 = t_now - timedelta(minutes=30)
recorder_runs = recorder.get_model('RecorderRuns')
states = recorder.get_model('States')
with recorder.session_scope() as session:
run = recorder_runs(
start=t_min_2,
end=t_now,
created=t_min_2
)
recorder._INSTANCE._commit(session, run)
for entity_id, state in entities.items():
dbstate = states(
entity_id=entity_id,
domain=split_entity_id(entity_id)[0],
state=state,
attributes='{}',
last_changed=t_min_1,
last_updated=t_min_1,
created=t_min_1)
recorder._INSTANCE._commit(session, dbstate)
def test_filling_the_cache():
"""Test filling the cache from the DB."""
test_entity_id1 = 'input_boolean.b1'
test_entity_id2 = 'input_boolean.b2'
hass = get_test_home_assistant()
hass.state = CoreState.starting
init_recorder_component(hass)
_add_data_in_last_run({
test_entity_id1: 'on',
test_entity_id2: 'off',
})
hass.block_till_done()
setup_component(hass, input_boolean.DOMAIN, {
input_boolean.DOMAIN: {
'b1': None,
'b2': None,
}})
hass.start()
state = hass.states.get('input_boolean.b1')
assert state
assert state.state == 'on'
state = hass.states.get('input_boolean.b2')
assert state
assert state.state == 'off'
hass.stop()
|
[
"[email protected]"
] | |
98a6e00741c6fdbf71c95bdee694690cc1d91e5d
|
8ebc3925894d4f796efb703cdf3254fc56724c3a
|
/aws-apigateway-py-routes/__main__.py
|
c243fc1ce23aa47798dadf5ad13b3cd563bd256e
|
[
"Apache-2.0"
] |
permissive
|
pulumi/examples
|
8db27b8847f8c05bcc8d99cdec8eb6c7b7ffa2a3
|
26ffb4bb327f00457796c96676e7db5e25e2bbd6
|
refs/heads/master
| 2023-09-04T04:56:53.098380 | 2023-08-31T14:33:12 | 2023-08-31T14:33:12 | 108,589,232 | 2,156 | 974 |
Apache-2.0
| 2023-09-13T23:27:18 | 2017-10-27T19:50:31 |
TypeScript
|
UTF-8
|
Python
| false | false | 5,573 |
py
|
# Copyright 2016-2021, Pulumi Corporation.
import json
import pulumi
import pulumi_aws as aws
import pulumi_aws_apigateway as apigateway
import lambdas
from dns import configure_dns
# Create a Cognito User Pool of authorized users
user_pool = aws.cognito.UserPool("user-pool")
user_pool_client = aws.cognito.UserPoolClient(
"user-pool-client", user_pool_id=user_pool.id, explicit_auth_flows=["ADMIN_NO_SRP_AUTH"])
# Define an endpoint that invokes a lambda to handle requests
api = apigateway.RestAPI('api', routes=[
# Serve an entire directory of static content
apigateway.RouteArgs(path="static", local_path="www"),
# Invoke our Lambda to handle a single route
apigateway.RouteArgs(path="lambda", method="GET",
event_handler=lambdas.hello_handler),
# Proxy requests to another service
apigateway.RouteArgs(path="proxy", target=apigateway.TargetArgs(
uri="https://www.google.com", type="http_proxy")),
# Use Swagger to define an HTTP proxy route
apigateway.RouteArgs(path="swagger", method="GET", data={
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"passthroughBehavior": "when_no_match",
"type": "http_proxy",
"uri": "https://httpbin.org/uuid",
},
}),
# Authorize requests using Cognito
apigateway.RouteArgs(
path="cognito-authorized",
method="GET",
event_handler=lambdas.hello_handler,
# Define an authorizer which uses Cognito to validate the token from the Authorization header
authorizers=[apigateway.AuthorizerArgs(
parameter_name="Authorization",
identity_source=["method.request.header.Authorization"],
provider_arns=[user_pool.arn]
)]
),
# Authorize requests using a Lambda function
apigateway.RouteArgs(path="lambda-authorized", method="GET", event_handler=lambdas.hello_handler,
authorizers=[apigateway.AuthorizerArgs(
auth_type="custom",
parameter_name="Authorization",
type="request",
identity_source=[
"method.request.header.Authorization"],
handler=lambdas.auth_lambda
)]),
apigateway.RouteArgs(path="key-authorized", method="GET",
event_handler=lambdas.hello_handler,
api_key_required=True)
])
# Define whole API using swagger (OpenAPI)
swagger_api = apigateway.RestAPI("swagger-api",
swagger_string=json.dumps({
"swagger": "2.0",
"info": {
"title": "example",
"version": "1.0",
},
"paths": {
"/": {
"get": {
"x-amazon-apigateway-integration": {
"httpMethod": "GET",
"passthroughBehavior": "when_no_match",
"type": "http_proxy",
"uri": "https://httpbin.org/uuid",
},
},
},
},
"x-amazon-apigateway-binary-media-types": ["*/*"],
})
)
# Create an API key to manage usage
api_key = aws.apigateway.ApiKey("api-key")
# Define usage plan for an API stage
usage_plan = aws.apigateway.UsagePlan("usage-plan",
api_stages=[aws.apigateway.UsagePlanApiStageArgs(
api_id=api.api.id,
stage=api.stage.stage_name)])
# Associate the key to the plan
aws.apigateway.UsagePlanKey('usage-plan-key',
key_id=api_key.id,
key_type="API_KEY",
usage_plan_id=usage_plan.id)
# Set up DNS if a domain name has been configured
config = pulumi.Config()
domain = config.get("domain")
if domain != None:
# Load DNS zone for the domain
zone = aws.route53.get_zone_output(name=config.require("dns-zone"))
# Create SSL Certificate and DNS entries
api_domain_name = configure_dns(domain=domain, zone_id=zone.id)
# Tell API Gateway what to serve on our custom domain
base_path_mapping = aws.apigateway.BasePathMapping("api-domain-mapping",
rest_api=api.api.id,
stage_name=api.stage.stage_name,
domain_name=api_domain_name.domain_name)
pulumi.export(
"custom-url", base_path_mapping.domain_name.apply(lambda domain: f'https://{domain}/'))
pulumi.export("url", api.url)
pulumi.export("user-pool-id", user_pool.id)
pulumi.export("user-pool-client-id", user_pool_client.id)
pulumi.export("swagger-url", swagger_api.url)
pulumi.export("api-key-value", api_key.value)
|
[
"[email protected]"
] | |
88144a5b367992096a70de7bf2ab6edca3aca8a0
|
a83bafc38b514a0339a5991be15870551ac49681
|
/bimdata_api_client/model/patched_unit_request.py
|
fe71e0a5af04aea155fba78e4e1175716c5abd9d
|
[] |
no_license
|
bimdata/python-api-client
|
4ec2f81e404ef88d3a7e4d08e18965b598c567a2
|
c9b6ea0fbb4729b2a1c10522bdddfe08d944739d
|
refs/heads/master
| 2023-08-17T13:38:43.198097 | 2023-08-09T12:48:12 | 2023-08-09T12:48:12 | 131,603,315 | 0 | 4 | null | 2022-10-10T15:21:26 | 2018-04-30T14:06:15 |
Python
|
UTF-8
|
Python
| false | false | 14,548 |
py
|
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1 (v1)
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from bimdata_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from bimdata_api_client.exceptions import ApiAttributeError
def lazy_import():
from bimdata_api_client.model.unit_request import UnitRequest
globals()['UnitRequest'] = UnitRequest
class PatchedUnitRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('type',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'type': (str,), # noqa: E501
'name': (str, none_type,), # noqa: E501
'unit_type': (str, none_type,), # noqa: E501
'prefix': (str, none_type,), # noqa: E501
'dimensions': ([float], none_type,), # noqa: E501
'conversion_factor': (float, none_type,), # noqa: E501
'conversion_baseunit': (UnitRequest,), # noqa: E501
'elements': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501
'is_default': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'type': 'type', # noqa: E501
'name': 'name', # noqa: E501
'unit_type': 'unit_type', # noqa: E501
'prefix': 'prefix', # noqa: E501
'dimensions': 'dimensions', # noqa: E501
'conversion_factor': 'conversion_factor', # noqa: E501
'conversion_baseunit': 'conversion_baseunit', # noqa: E501
'elements': 'elements', # noqa: E501
'is_default': 'is_default', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""PatchedUnitRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
type (str): IfcDerivedUnit, IfcContextDependentUnit, IfcConversionBasedUnit, IfcSIUnit or IfcMonetaryUnit. [optional] # noqa: E501
name (str, none_type): Name of the unit (ex: DEGREE). [optional] # noqa: E501
unit_type (str, none_type): IFC type of the unit or user defined type (ex: PLANEANGLEUNIT for DEGREE and RADIAN). [optional] # noqa: E501
prefix (str, none_type): Litteral prefix for scale (ex: MILLI, KILO, etc..). [optional] # noqa: E501
dimensions ([float], none_type): List of 7 units dimensions. [optional] # noqa: E501
conversion_factor (float, none_type): Factor of conversion and base unit id (ex: DEGREE from RADIAN with factor 0.0174532925199433). [optional] # noqa: E501
conversion_baseunit (UnitRequest): [optional] # noqa: E501
elements ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): List of constitutive unit elements by id with corresponding exponent (ex: [meterID/1, secondID/-1] for velocity). [optional] # noqa: E501
is_default (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PatchedUnitRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
type (str): IfcDerivedUnit, IfcContextDependentUnit, IfcConversionBasedUnit, IfcSIUnit or IfcMonetaryUnit. [optional] # noqa: E501
name (str, none_type): Name of the unit (ex: DEGREE). [optional] # noqa: E501
unit_type (str, none_type): IFC type of the unit or user defined type (ex: PLANEANGLEUNIT for DEGREE and RADIAN). [optional] # noqa: E501
prefix (str, none_type): Litteral prefix for scale (ex: MILLI, KILO, etc..). [optional] # noqa: E501
dimensions ([float], none_type): List of 7 units dimensions. [optional] # noqa: E501
conversion_factor (float, none_type): Factor of conversion and base unit id (ex: DEGREE from RADIAN with factor 0.0174532925199433). [optional] # noqa: E501
conversion_baseunit (UnitRequest): [optional] # noqa: E501
elements ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): List of constitutive unit elements by id with corresponding exponent (ex: [meterID/1, secondID/-1] for velocity). [optional] # noqa: E501
is_default (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
[
"[email protected]"
] | |
99fa9801cdae9f52933c03149abc616dfe5cdadb
|
d418edb92b92d35a32a198d8675defb21448f513
|
/Assignment/4.py
|
4e6f781c3c332785be89906032891ee2fef22146
|
[] |
no_license
|
JAntonioMarin/CursoPython3
|
a75ce49696e23903398fc186c81b5fb7c2116c21
|
ba04888eb6e5495b5180cbc5ed7a5c804ee8dbaf
|
refs/heads/master
| 2020-12-27T04:18:21.010039 | 2020-02-17T12:09:37 | 2020-02-17T12:09:37 | 237,762,726 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 714 |
py
|
def num_max(enteros):
valorInicial = enteros[0]
valorMaximo = valorInicial
for valor in range (1, len(enteros)):
if(valorMaximo<enteros[valor]):
valorMaximo = enteros[valor]
elif(valorMaximo==enteros[valor]):
valorInicial = valorMaximo
if(valorInicial==valorMaximo):
print("Existen 2 o mas máximos")
else:
print("El valor maximo es", valorMaximo)
enteros = input("Por favor introduce enteros separados por espacio:")
cadenas = enteros.split(' ')
cadenaEnteros = []
for cadena in cadenas:
cadenaEnteros.append(int(cadena))
if(len(cadenaEnteros)>1):
num_max(cadenaEnteros)
else:
print("El valor maximo es", cadenaEnteros[0])
|
[
"[email protected]"
] | |
769fdc01c08347489e610c53259a047ffcf4ba3e
|
6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41
|
/lib/phonenumbers/data/region_HU.py
|
92c290b8f89c02dee52458951bc4d2c5236316bb
|
[] |
no_license
|
JamesBrace/InfluenceUWebLaunch
|
549d0b48ff3259b139cb891a19cb8b5382ffe2c8
|
332d25940e4b1b45a7a2a8200f77c8413543b199
|
refs/heads/master
| 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,133 |
py
|
"""Auto-generated file, do not edit by hand. HU metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_HU = PhoneMetadata(id='HU', country_code=36, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{7,8}', possible_number_pattern='\\d{6,9}', possible_length=(8, 9), possible_length_local_only=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1\\d|2[2-9]|3[2-7]|4[24-9]|5[2-79]|6[23689]|7[2-9]|8[2-57-9]|9[2-69])\\d{6}', possible_number_pattern='\\d{6,8}', example_number='12345678', possible_length=(8,), possible_length_local_only=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[257]0|3[01])\\d{7}', possible_number_pattern='\\d{9}', example_number='201234567', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='[48]0\\d{6}', possible_number_pattern='\\d{8}', example_number='80123456', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='9[01]\\d{6}', possible_number_pattern='\\d{8}', example_number='90123456', possible_length=(8,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(national_number_pattern='21\\d{7}', possible_number_pattern='\\d{9}', example_number='211234567', possible_length=(9,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='38\\d{7}', possible_number_pattern='\\d{6,9}', example_number='381234567', possible_length=(9,)),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(national_number_pattern='[48]0\\d{6}', possible_number_pattern='\\d{8}', example_number='80123456', possible_length=(8,)),
national_prefix='06',
national_prefix_for_parsing='06',
number_format=[NumberFormat(pattern='(1)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='(\\1)'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2-9]'], national_prefix_formatting_rule='(\\1)')],
mobile_number_portable_region=True)
|
[
"[email protected]"
] | |
00fbff804c306f16c0d80cab257452d1511c1d7a
|
f8f6dadfb4215776ee40f022265d6c87ad2cc65b
|
/examples/dev/coord_transform.py
|
b0f390b9fd0a2c8d78a51c7f10bfcf4361189195
|
[] |
no_license
|
oldbay/raster_tools
|
559112431146ccf3e0ed29a4a8ee3e3ac8adf025
|
171ed1313a0805cc2d7e8f8049914848ebee8331
|
refs/heads/master
| 2021-01-10T20:38:49.075348 | 2019-04-08T18:49:17 | 2019-04-08T18:49:17 | 31,228,764 | 11 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
from rtools import geom_conv
coords = [
(296153.369,7137678.937),
(296203.959,7137570.986),
(296256.938,7137645.476)
]
conv = geom_conv(32638)
print conv.coords_reproj(4326, *coords)
|
[
"[email protected]"
] | |
807a837d4cf69d4aa7173c4051e4c3c14d413ad2
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/metadata/metadata/backend/mysql/replica_base.py
|
3ab330a878a927334f7842a1f2dc037e3d41de13
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 |
NOASSERTION
| 2022-04-02T10:30:56 | 2021-06-29T06:10:01 |
Python
|
UTF-8
|
Python
| false | false | 3,555 |
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# 用于生成映射表的ReplicaBase。会自动合入MixIn信息。
from collections import Sequence
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from metadata.db_models.meta_service.replica_conf import replica_mixins
class ReplicaMixIn(object):
"""
映射表补充字段MixIn父类。
"""
pass
replica_mixin_classes_info = {}
for module in replica_mixins:
for attr in dir(module):
item = getattr(module, attr)
if isinstance(item, type) and issubclass(item, ReplicaMixIn) and item is not ReplicaMixIn:
replica_mixin_classes_info[attr.split('MixIn')[0]] = item
class ReplicaMeta(DeclarativeMeta):
"""自动生成映射表Model的元类。"""
def __new__(mcs, name, bases, namespace):
# 自动添加mixin
if name == 'Base':
return super(ReplicaMeta, mcs).__new__(mcs, name, bases, namespace)
else:
namespace[str('__abstract__')] = True
table_args = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8', 'mysql_collate': 'utf8_general_ci'}
if namespace.get('__table_args__'):
if isinstance(namespace['__table_args__'], Sequence):
table_args_lst = list(namespace['__table_args__'])
if isinstance(table_args_lst[-1], dict):
table_args_lst[-1].update(table_args)
else:
table_args_lst.append(table_args)
else:
namespace['__table_args__'].update(table_args)
namespace['__table_args__'] = table_args
cls = super(ReplicaMeta, mcs).__new__(mcs, name, tuple(bases), namespace)
mix_bases = [cls]
if name in replica_mixin_classes_info:
mix_bases.insert(0, replica_mixin_classes_info[name])
mixed_cls = super(ReplicaMeta, mcs).__new__(mcs, str('Replica') + name, tuple(mix_bases), {})
return mixed_cls
ReplicaBase = declarative_base(metaclass=ReplicaMeta)
metadata = ReplicaBase.metadata
ReplicaBase.db_name = ReplicaBase._db_name = 'bkdata_meta'
|
[
"[email protected]"
] | |
bf79e6abe3010bd150292be1c8375b3bc68486c1
|
56939ddccf722903ef6cc7ebc6aa85ee6b80b6dc
|
/orders/migrations/0003_alter_ordermodel_user.py
|
ffe024e4fa843e2440cd5c3d6357c752c5ace8d0
|
[] |
no_license
|
Colibri7/felix_shop
|
d519b038446a49fc33cb1ce1c9108291e949c848
|
d9d635ad7797505503b56df2b36b4333bfa86b31
|
refs/heads/master
| 2023-07-21T03:30:36.714548 | 2021-08-30T11:03:53 | 2021-08-30T11:03:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 624 |
py
|
# Generated by Django 3.2.4 on 2021-07-22 09:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0002_auto_20210717_2208'),
]
operations = [
migrations.AlterField(
model_name='ordermodel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
[
"[email protected]"
] | |
db2dbeaf40c8d5ecc580df89a0eff4a1cd09dca8
|
a3ddd9a1cae3271d285daf272d733b67758d5cc7
|
/award/models.py
|
e54c876cf57587d41ec24e8352cfc90a1e61dccd
|
[] |
no_license
|
peroh/memba-api
|
21edf7757b1dfcd2a42b6f52ed3bc25d0780106a
|
e6e5cc5bbd2afdcba5c2b7e9ee45ff717e4b75f1
|
refs/heads/master
| 2021-09-08T05:09:56.948766 | 2018-03-07T11:00:34 | 2018-03-07T11:00:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class AwardCategory(models.Model):
title = models.CharField(max_length=128)
description = models.TextField()
def __str__(self):
return self.title
class Award(models.Model):
award_category = models.ForeignKey('award.AwardCategory')
member = models.ForeignKey('member.Member')
attained = models.DateField()
def __str__(self):
return self.award_category.__str__()
|
[
"[email protected]"
] | |
62669ff92a52f59f2282192d2d60739662712041
|
98b76260f5c31563aa40e76c412be514c1844fc2
|
/fHDHR_web/files/__init__.py
|
905566b59642e2809bef5b2c68fc79c8b3236bfc
|
[
"WTFPL"
] |
permissive
|
DanAustinGH/fHDHR_Locast
|
cb54b235200a6123213853a133d6231df3b3a1ea
|
002117b666ad650c523aedb0209f1c996d576169
|
refs/heads/main
| 2023-02-15T20:27:21.141592 | 2021-01-05T19:09:35 | 2021-01-05T19:09:35 | 327,135,296 | 0 | 0 |
WTFPL
| 2021-01-05T22:28:13 | 2021-01-05T22:28:13 | null |
UTF-8
|
Python
| false | false | 314 |
py
|
from .favicon_ico import Favicon_ICO
from .style_css import Style_CSS
from .device_xml import Device_XML
class fHDHR_Files():
def __init__(self, fhdhr):
self.fhdhr = fhdhr
self.favicon = Favicon_ICO(fhdhr)
self.style = Style_CSS(fhdhr)
self.device_xml = Device_XML(fhdhr)
|
[
"[email protected]"
] | |
0b8eb9d2a021eff067d7027dde5cd87766102069
|
a93fe443762b5183c10bfa2ea71dc1b000708f34
|
/spacecode/ibanpy/models.py
|
304714c7bd02f6d91ac422a7999168109ae8e4e2
|
[] |
no_license
|
zoleikha-mousavipak/Django_Advance-Models
|
06d89fe63d1210224c478cbd3e2c0a19987ade0b
|
c238a69134232b7d54c69233c082787913c7eabd
|
refs/heads/master
| 2020-11-25T00:04:22.553911 | 2019-12-16T14:29:31 | 2019-12-16T14:29:31 | 228,400,079 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 634 |
py
|
from django.db import models
class MyIBANField(models.Field):
def db_type(self, connection):
return 'char(25)'
def from_db_value(self, value, expression, connection):
return value
def to_python(self, value):
return value
def value_to_string(self, obj):
value = self.value_from_object(obj)
return self.get_prep_value(value)
class MyIBANModel(models.Model):
_iban = MyIBANField()
@property
def iban(self):
return self._iban
@iban.setter
def iban(self, value):
self._iban = value
def __str__(self):
return self.iban
|
[
"[email protected]"
] | |
1e3302bdc5b0746f1f6287f3c77657b08e6f1931
|
11c8dbe77dce5616e8e7ff5df647192887c279d3
|
/Sodda dasturlar/Sutkani boshidan secungacha necha minut va soat borligini aniqlash.py
|
bb0df7b1b966e4c76ca39f992fc06c7ad76bea7e
|
[] |
no_license
|
extremums/the-simplest-programs
|
1fe9be078dc53260c47e7c1e3b2a2300b287e862
|
71d9be72fac1013c7b8ee8c0f792d24fc54e854a
|
refs/heads/main
| 2023-04-27T07:20:13.836780 | 2021-05-11T11:03:47 | 2021-05-11T11:03:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
# Sutkani boshidan secungacha necha minut va soat borligini aniqlash
sekund = input("Sekundni kiriting ")
if sekund.isdigit():
sekund = int(sekund)
soat = sekund//3600
sekund %= 3600
minut = sekund//60
sekund %= 60
print("Sutkani boshidan {} soat {} minut va {} secund o'tdi".format(soat,minut,sekund))
else :
print('Natural son kiritish lozim')
|
[
"[email protected]"
] | |
0afdacdf006dd4db28c2c037759bbef9c7385c71
|
ba895ee2765b60ddf2da15307f038c6a884da4ec
|
/month03/month03_django/django/day06/bookMgr/bookMgr/urls.py
|
3d84edec1ad804d943e88e23d695f8c78b14fa50
|
[] |
no_license
|
jay0613/2020-0720-note
|
dc53831b829f7e7437fc57937eef38ab9e3942e9
|
7b2babd30a4dd9897b7853527a07e8a8fe2ba3ea
|
refs/heads/master
| 2022-12-06T17:01:19.542832 | 2020-08-22T10:39:06 | 2020-08-22T10:39:06 | 281,112,932 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 807 |
py
|
"""bookMgr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('bookstore/',include('bookstore.urls'))
]
|
[
"[email protected]"
] | |
8c96ef0652543325ee68e9afa0303c98f18ad8fb
|
e5c56d6340edd36167f53605e8d766972fe706ca
|
/sen/tui/widgets/graph.py
|
c019a34c1cb52dc740435f933d58b52881a6cbc6
|
[
"MIT"
] |
permissive
|
TomasTomecek/sen
|
47090e7edcbf7f9358cb256d5d9d6885ee662840
|
ec292b5a723cd59818e3a36a7ea5091625fb3258
|
refs/heads/master
| 2023-06-21T14:19:45.138546 | 2023-04-12T07:21:42 | 2023-04-12T07:21:42 | 43,115,475 | 1,011 | 78 |
MIT
| 2023-04-12T07:21:43 | 2015-09-25T07:13:43 |
Python
|
UTF-8
|
Python
| false | false | 1,686 |
py
|
import math
import logging
import urwid
logger = logging.getLogger(__name__)
def find_max(list_of_lists):
list_of_ints = [x[0] for x in list_of_lists]
m = max(list_of_ints)
try:
return 2 ** int(math.log2(m) + 1)
except ValueError:
return 1
class ContainerInfoGraph(urwid.BarGraph):
def __init__(self, fg, bg, graph_bg="graph_bg", bar_width=None):
"""
create a very simple graph
:param fg: attr for smoothing (fg needs to be set)
:param bg: attr for bars (bg needs to be set)
:param graph_bg: attr for graph background
:param bar_width: int, width of bars
"""
# satt smoothes graph lines
satt = {(1, 0): fg}
super().__init__(
[graph_bg, bg],
hatt=[fg],
satt=satt,
)
if bar_width is not None:
# breaks badly when set too high
self.set_bar_width(bar_width)
def render(self, size, focus=False):
data, top, hlines = self._get_data(size)
maxcol, maxrow = size
if len(data) < maxcol:
data += [[0] for x in range(maxcol - len(data))]
self.set_data(data, top, hlines)
logger.debug(data)
return super().render(size, focus)
def rotate_value(self, val, max_val=None, adaptive_max=False):
"""
"""
data, _, _ = self.data
data = data[1:] + [[int(val)]]
if adaptive_max:
max_val = find_max(data)
self.set_data(data, max_val)
return max_val
def set_max(self, value):
data, top, hlines = self.data
self.set_data(data, value, hlines)
|
[
"[email protected]"
] | |
9724ad7aa8b657a2fb8995698be8b31bb74da85b
|
4bfc3c184e736bb68dccbb6d5657f11c950df002
|
/tests/common/test_run/cos_run.py
|
bff15552b892ebb77b63bcedbb20fc31b913efe3
|
[
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
laekov/akg
|
159aa64ef6135222b5af784c408731275dfa9bdb
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
refs/heads/master
| 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 |
Apache-2.0
| 2020-08-19T08:41:30 | 2020-08-19T08:36:53 |
Python
|
UTF-8
|
Python
| false | false | 1,779 |
py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cos run function."""
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import cos
from base import get_rtol_atol
from gen_random import random_gaussian
def cos_run(shape, dtype, attrs):
# Generate data for testing the op
inputs = random_gaussian(shape, miu=0, sigma=0.1).astype(dtype)
expect = np.cos(inputs)
# inputs and output to hold the data
output = np.full(shape, np.nan, dtype)
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(cos.cos, [shape], [dtype], kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
return mod, expect, (inputs, output)
else:
return mod
else:
mod = utils.op_build_test(cos.cos, [shape], [dtype], kernel_name='cos', attrs=attrs)
# result_tvm
output = utils.mod_launch(mod, (inputs, output))
# compare result
rtol, atol = get_rtol_atol("cos", dtype)
TestCase_Result = compare_tensor(output, expect, rtol=rtol, atol=atol, equal_nan=False)
return inputs, output, expect, TestCase_Result
|
[
"[email protected]"
] | |
1d4f134c14d925405ea4bbb418b726779acaf34c
|
20e4b646c2b17483d5bd298b2a0ae0361534bd75
|
/Book_python3_web_spider/6_Ajax结果提取/test.py
|
ea0c67459127673c8c005fbf760199bfda6a34b5
|
[] |
no_license
|
zzf531/WebSpider
|
fd68080fe9847d0a781024916a09959c58ef0ce3
|
6cd150aeeacd1bc6ec42c80579b2a4f25a39acce
|
refs/heads/master
| 2020-08-09T23:16:05.585782 | 2020-03-01T03:46:58 | 2020-03-01T03:46:58 | 214,198,000 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 537 |
py
|
"""
https://www.toutiao.com/api/search/content/?
aid=24&app_name=web_search
&offset=20&format=json
&keyword=%E8%A1%97%E6%8B%8D
&autoload=true
&count=20
&en_qc=1
&cur_tab=1
&from=search_tab
&pd=synthesis
×tamp=1582979073578"""
params = {
'aid': '24',
'app_name': 'web_search',
'offset': offset,
'format': 'json',
'keyword': '街拍',
'autoload': 'true',
'count': '20',
'en_qc': '1',
'cur_tab': '1',
'from': 'search_tab',
'pd': 'synthesis',
}
|
[
"[email protected]"
] | |
8d76b7cbe0978b22f6558074ec5235ee0d556c1d
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/world/DistributedGAInterior.py
|
7ecd938ea4f6de9eca964fd400377c2af9d62318
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,111 |
py
|
import random
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed import DistributedObject
from direct.distributed import DistributedCartesianGrid
from direct.showbase.PythonUtil import report
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import TODGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals, RadarGui
from pirates.world.LocationConstants import LocationIds, getLocationList
from pirates.world import DistributedIsland
from pirates.world import DistributedGameArea
from pirates.world import GridAreaBuilder
from pirates.world import WorldGlobals
from pirates.map.Minimap import InteriorMap
from pirates.map.Mappable import MappableGrid
from otp.otpbase import OTPRender
class DistributedGAInterior(DistributedGameArea.DistributedGameArea, DistributedCartesianGrid.DistributedCartesianGrid, MappableGrid):
notify = directNotify.newCategory('DistributedGAInterior')
def __init__(self, cr):
DistributedGameArea.DistributedGameArea.__init__(self, cr)
DistributedCartesianGrid.DistributedCartesianGrid.__init__(self, cr)
MappableGrid.__init__(self)
self.intervals = []
self.fadeInTrack = None
self.autoFadeIn = True
self.musicName = None
self.buildingInterior = False
def setBuildingInterior(self, buildingInterior):
self.buildingInterior = buildingInterior
def announceGenerate(self):
DistributedGameArea.DistributedGameArea.announceGenerate(self)
DistributedCartesianGrid.DistributedCartesianGrid.announceGenerate(self)
if not base.cr.activeWorld.worldGrid:
base.cr.activeWorld.setWorldGrid(self)
self.loadModel()
for obj in self.geom.findAllMatches('**/=ignore-lighting'):
obj.setLightOff(1000)
doorPlanes = self.geom.findAllMatches('**/door_collision_planar_*;+s')
doorPlanes.stash()
self.startProcessVisibility(base.localAvatar)
base.worldCreator.registerSpecialNodes(self, self.uniqueId)
self.builder.loadObjects()
self.enableFloors()
self.loadConnectors()
self.initBlockers(self)
self.startCustomEffects()
self.builder.resumeSFX()
self.closeSfx = loadSfx(SoundGlobals.SFX_DOOR_SLAM)
if self.buildingInterior:
self.setZ(100)
def disable(self):
self.stopCustomEffects()
self.builder.pauseSFX()
self.unloadConnectors()
if self.fadeInTrack:
self.fadeInTrack.pause()
self.fadeInTrack = None
self.ignoreAll()
DistributedGameArea.DistributedGameArea.disable(self)
DistributedCartesianGrid.DistributedCartesianGrid.disable(self)
del self.closeSfx
def delete(self):
del self.coll
self.geom.remove_node()
if self.modelPath != 'models/buildings/navy_jail_interior':
self.stopProcessVisibility()
self.handleExitGameArea(None)
self.fadeOutSoundAndMusic()
self.disableFloors()
for anim in self.intervals:
if anim:
anim.pause()
del anim
continue
self.intervals = []
DistributedGameArea.DistributedGameArea.delete(self)
DistributedCartesianGrid.DistributedCartesianGrid.delete(self)
def isGridParent(self):
return 1
def addObjectToGrid(self, av):
DistributedCartesianGrid.DistributedCartesianGrid.addObjectToGrid(self, av)
if av.isLocal():
self.updateAvReturnLocation(av)
def setLinks(self, links):
DistributedGameArea.DistributedGameArea.setLinks(self, links)
self.loadConnectors()
def setConnectorId(self, connectorId):
self.connectorId = connectorId
def enableFloors(self):
floorName = 'floor_interior'
self.uniqueFloorName = self.uniqueName(floorName)
collNodes = self.findAllMatches('**/+CollisionNode')
for collNode in collNodes:
curMask = collNode.node().getIntoCollideMask()
if curMask.hasBitsInCommon(PiratesGlobals.FloorBitmask):
collNode.setName(self.uniqueFloorName)
self.setupCannonballLandColl(collNode, PiratesGlobals.TargetBitmask | curMask, 0)
self.accept('enterFloor' + self.uniqueFloorName, self.handleEnterGameArea)
self.accept('exitFloor' + self.uniqueFloorName, self.handleExitGameArea)
def disableFloors(self):
if self.uniqueFloorName:
self.ignore('enterFloor' + self.uniqueFloorName)
self.ignore('exitFloor' + self.uniqueFloorName)
def handleEnterGameArea(self, collEntry):
localAvatar.interior = self
self.addObjectToGrid(localAvatar)
if self.buildingInterior:
localAvatar.setInterest(self.doId, 2709, ['ga-interior'])
DistributedGameArea.DistributedGameArea.handleEnterGameArea(self, collEntry)
def setLocation(self, parentId, zoneId, teleport = 0):
DistributedObject.DistributedObject.setLocation(self, parentId, zoneId)
self.reparentTo(render)
def getZoneFromXYZ(self, *args):
if self.buildingInterior:
return 2709
return DistributedCartesianGrid.DistributedCartesianGrid.getZoneFromXYZ(self, *args)
def handleExitGameArea(self, collEntry):
if collEntry:
return None
self.removeObjectFromGrid(localAvatar)
self.stopProcessVisibility()
localAvatar.interior = None
localAvatar.clearInterestNamed(None, ['ga-interior'])
DistributedGameArea.DistributedGameArea.handleExitGameArea(self, collEntry)
def loadModelParts(self):
if self.modelPath.startswith('models/islands/pir_m_are_isl_'):
self.geom = loader.loadModel(self.modelPath)
return None
modelBaseName = self.modelPath.split('_zero')[0]
terrainModel = loader.loadModel(modelBaseName + '_terrain', okMissing = True)
if terrainModel:
self.geom = terrainModel
else:
self.geom = loader.loadModel(self.modelPath)
return None
terrainDetailModel = loader.loadModel(modelBaseName + '_terrain_detail', okMissing = True)
if terrainDetailModel:
self.notify.debug('loading _terrain_detail')
terrainDetailModel.getChild(0).reparentTo(self.geom)
pierModel = loader.loadModel(modelBaseName + 'pier', okMissing = True)
if pierModel:
self.notify.debug('loading pier')
pierModel.getChild(0).reparentTo(self.geom)
fortModel = loader.loadModel(modelBaseName + '_fort', okMissing = True)
if fortModel:
self.notify.debug('loading _fort')
fortModel.getChild(0).reparentTo(self.geom)
logModel = loader.loadModel(modelBaseName + '_logs', okMissing = True)
if logModel:
self.notify.debug('loading _logs')
logModel.getChild(0).reparentTo(self.geom)
vegeWallModel = loader.loadModel(modelBaseName + '_nat_wall', okMissing = True)
if vegeWallModel:
self.notify.debug('loading _nat_wall')
vegeWallModel.getChild(0).reparentTo(self.geom)
vegModel = loader.loadModel(modelBaseName + '_veg', okMissing = True)
if vegModel:
self.notify.debug('loading _veg')
vegModel.getChild(0).reparentTo(self.geom)
rockModel = loader.loadModel(modelBaseName + '_rocks', okMissing = True)
if rockModel:
self.notify.debug('loading _rocks')
rockModel.getChild(0).reparentTo(self.geom)
mapNode = self.getMapNode()
if mapNode and not mapNode.isEmpty():
mapNode.hide()
def loadModel(self):
if 'interior' not in self.modelPath:
self.loadModelParts()
else:
self.geom = loader.loadModel(self.modelPath)
self.geom.findAllMatches('**/door_hole*').setColorScale(Vec4(0, 0, 0, 1))
self.geom.reparentTo(self)
self.geom.hide(OTPRender.MainCameraBitmask)
self.geom.showThrough(OTPRender.EnviroCameraBitmask)
coll = self.geom.findAllMatches('**/+CollisionNode')
self.coll = coll
locatorNodes = self.geom.findAllMatches('**/portal_interior_*')
locatorNodes.wrtReparentTo(self)
self.locatorNodes = locatorNodes
self.portalNodes = self.geom.findAllMatches('**/portal_[0-9]')
self.initBlockers(self.geom)
def setName(self, name):
self.name = name
def getTeam(self):
return PiratesGlobals.ISLAND_TEAM
def updateAvReturnLocation(self, av):
if 'Jail' in self.dclass.getName():
av.d_requestReturnLocation(self.doId)
def enterInteriorFromDoor(self, doorIndex):
base.cr.loadingScreen.showTarget(self.uniqueId)
base.cr.loadingScreen.show()
doorIndexStr = ''
if doorIndex > 0:
doorIndexStr = '_' + str(doorIndex + 1)
self.doorLeftStr = '**/door_left' + doorIndexStr
self.doorRightStr = '**/door_right' + doorIndexStr
self.doorLocatorStr = '**/door_locator' + doorIndexStr
doorLeft = self.geom.find(self.doorLeftStr)
doorRight = self.geom.find(self.doorRightStr)
self.openDoorIval = Parallel()
self.closeDoorIval = Parallel()
self.tOpen = 0.5
if not doorLeft.isEmpty():
self.openDoorIval.append(LerpHprInterval(doorLeft, self.tOpen, Vec3(-90, 0, 0)))
self.closeDoorIval.append(LerpHprInterval(doorLeft, self.tOpen, Vec3(0, 0, 0)))
if not doorRight.isEmpty():
self.openDoorIval.append(LerpHprInterval(doorRight, self.tOpen, Vec3(90, 0, 0)))
self.closeDoorIval.append(LerpHprInterval(doorRight, self.tOpen, Vec3(0, 0, 0)))
doorLocator = self.geom.find(self.doorLocatorStr)
if doorLocator.isEmpty():
doorLocator = self.geom.find(self.doorLeftStr)
if doorLocator.isEmpty():
doorLocator = self.geom.find(self.doorRightStr)
localAvatar.reparentTo(doorLocator)
localAvatar.setPos(0, 10, 0)
localAvatar.setHpr(0, 0, 0)
localAvatar.wrtReparentTo(self)
localAvatar.setP(0)
localAvatar.setR(0)
localAvatar.setScale(1)
self.handleEnterGameArea(None)
base.loadingScreen.tick()
messenger.send('doorToInteriorFadeIn', [
self.uniqueId])
base.loadingScreen.tick()
if self.autoFadeIn:
fadeInFunc = Func(base.transitions.fadeIn, self.tOpen)
playerStateFunc = Func(localAvatar.gameFSM.request, 'LandRoam')
else:
def Nothing():
pass
fadeInFunc = Func(Nothing)
if self.autoFadeIn:
sf = Sequence(Func(self.requestDoorInteract), fadeInFunc, self.openDoorIval, self.closeDoorIval, Func(self.closeSfx.play), Func(self.requestPlayerStateFunc))
else:
sf = Sequence(Func(self.requestDoorInteract), fadeInFunc, self.openDoorIval, self.closeDoorIval, Func(self.requestPlayerStateFunc))
self.fadeInTrack = sf
self.fadeInTrack.start()
base.cr.loadingScreen.hide()
def requestPlayerStateFunc(self):
if localAvatar.getGameState() in [
'Injured']:
return None
if self.autoFadeIn:
localAvatar.gameFSM.request('LandRoam')
def requestDoorInteract(self):
if localAvatar.getGameState() in [
'Injured']:
return None
localAvatar.gameFSM.request('DoorInteract')
def handleChildArrive(self, childObj, zoneId):
DistributedGameArea.DistributedGameArea.handleChildArrive(self, childObj, zoneId)
if childObj.isLocal():
self.updateAvReturnLocation(childObj)
self.builder.checkForHolidayObjects()
self.requestSoundAndMusic()
if not self.footstepSound:
localAvatar.setAreaFootstep('Wood')
self.setupMinimap()
if self.minimap and localAvatar.getMinimapObject():
self.minimap.addObject(localAvatar.getMinimapObject())
localAvatar.guiMgr.setMinimap(self.minimap)
def handleChildLeave(self, childObj, zoneId):
DistributedGameArea.DistributedGameArea.handleChildLeave(self, childObj, zoneId)
if childObj.isLocal():
localAvatar.guiMgr.clearMinimap(self.minimap)
self.destroyMinimap()
self.fadeOutSoundAndMusic()
def loadConnectors(self):
if 'interior' in self.modelPath and 'fortCharles_zero' in self.modelPath and 'kingshead_zero' in self.modelPath or 'pir_m_bld_int_tavernA_oneDoor' in self.modelPath:
return None
DistributedGameArea.DistributedGameArea.loadConnectors(self)
def unloadConnectors(self):
if 'interior' in self.modelPath and 'fortCharles_zero' in self.modelPath and 'kingshead_zero' in self.modelPath or 'pir_m_bld_int_tavernA_oneDoor' in self.modelPath:
return None
DistributedGameArea.DistributedGameArea.unloadConnectors(self)
def setAutoFadeInOnEnter(self, autoFadeIn):
self.autoFadeIn = autoFadeIn
def getTeleportDestPosH(self, index = 0):
pt = self._getTunnelSpawnPos(index)
if pt == None:
pt = self._getDoorSpawnPos(index)
return (pt[0], pt[1], pt[2], 0)
def _getDoorSpawnPos(self, index = 0):
doorIndexStr = ''
if index > 0:
index = '_' + str(index + 1)
doorLocatorStr = '**/door_locator' + doorIndexStr
doorLocator = self.find(doorLocatorStr)
if doorLocator.isEmpty():
doorLocator = self.find(self.doorLeftStr)
if doorLocator.isEmpty():
doorLocator = self.find(self.doorRightStr)
return self.getRelativePoint(doorLocator, Point3(0, 10, 0))
def turnOn(self, av=None):
self.unstash()
if self.buildingInterior:
av = None
DistributedGameArea.DistributedGameArea.turnOn(self, av)
DistributedCartesianGrid.DistributedCartesianGrid.turnOn(self, av)
def turnOff(self):
DistributedGameArea.DistributedGameArea.turnOff(self)
DistributedCartesianGrid.DistributedCartesianGrid.turnOff(self)
self.stash()
def getLevel(self):
return 1
def handleLowTerrainDetail(self):
grids = self.findAllMatches('**/Grid-*')
for dl in self.builder.dynamicLights:
if dl.type != 0:
for gi in xrange(0, grids.getNumPaths()):
geomParent = grids[gi].getChild(0)
geomParent.setLightOff(dl.lightNodePath)
for ci in xrange(0, geomParent.getNumChildren()):
geoms = geomParent.getChild(ci)
geoms.setLightOff(dl.lightNodePath)
def requestSoundAndMusic(self):
self.ambientName = SoundGlobals.getAmbientFromStr(self.modelPath)
if not self.ambientName == SoundGlobals.AMBIENT_JUNGLE and self.ambientName == SoundGlobals.AMBIENT_CAVE or self.ambientName == SoundGlobals.AMBIENT_SWAMP:
base.ambientMgr.requestFadeIn(self.ambientName, finalVolume = PiratesGlobals.DEFAULT_AMBIENT_VOLUME)
if self.musicName:
base.musicMgr.requestFadeOut(self.musicName)
self.musicName = None
if self.uniqueId == LocationIds.RAMBLESHACK_INSIDE and localAvatar.getTutorialState() < 2:
self.musicName = SoundGlobals.MUSIC_COMBAT_A
base.musicMgr.request(self.musicName, priority = 1, volume = 0.3)
elif 'tavern' in self.modelPath:
self.musicName = random.choice((SoundGlobals.MUSIC_TAVERN_A, SoundGlobals.MUSIC_TAVERN_B, SoundGlobals.MUSIC_TAVERN_C))
base.musicMgr.request(self.musicName, priority = 1, volume = 0.5)
def fadeOutSoundAndMusic(self):
if hasattr(self, 'ambientName'):
if not self.ambientName == SoundGlobals.AMBIENT_JUNGLE and self.ambientName == SoundGlobals.AMBIENT_CAVE:
pass
if not (self.ambientName == SoundGlobals.AMBIENT_SWAMP):
base.ambientMgr.requestFadeOut(self.ambientName)
if self.musicName:
base.musicMgr.requestFadeOut(self.musicName)
self.musicName = None
def setupMinimap(self):
if not (self.minimap) and self.getMapNode():
self.minimap = InteriorMap(self)
def destroyMinimap(self):
if self.minimap:
self.minimap.destroy()
self.minimap = None
def getGridParameters(self):
return (self.cellWidth, self.viewingRadius)
def getTunnelNodes(self):
return self.locatorNodes
def isInInvasion(self):
return False
def getArmorScale(self):
return 1.0
|
[
"[email protected]"
] | |
adb73ef3cb0ce41f0e0a48c8f32809f66c3f59e6
|
77f6f49fbb71a51c77b169d2e902507e68e33615
|
/1373_3.py
|
dba9b02d1759cf11ed2e6455eb67e65386094acd
|
[] |
no_license
|
goodsosbva/BOJ_theothers
|
1d39657b34f11cc940f0013011d83f5b3dfdc474
|
cf3da17e488b50d3d7cff65d403ba9dc58b8170e
|
refs/heads/main
| 2023-07-17T22:06:42.964302 | 2023-07-11T09:31:53 | 2023-07-11T09:31:53 | 334,390,930 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 244 |
py
|
print(oct(int(input(), 2))[2:]) # (int(input(), 2) input받은 값은 이진수 값이고, 이값을 십진수로 바꾸겠다는 뜻
# 십진수 값을 8진수로 바꾸고 출력 2번째부터 출력([2:])
|
[
"[email protected]"
] | |
d126ba6aec22a916b3e5a0f5725922602063046c
|
cefa560ae84e4bcb7a8f2828357ffd3ba8a88b49
|
/setup.py
|
9549c17fef0d9acac16f246cb4c2586a92b74d8d
|
[] |
no_license
|
paultag/python-jackpot
|
eb294240925e598873598c96edb65cb191863e67
|
f4a832153a2eb71d2fed4e929b944093181a4d19
|
refs/heads/master
| 2021-04-03T09:07:54.113177 | 2018-03-18T15:59:06 | 2018-03-18T15:59:20 | 124,565,769 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 385 |
py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from jackpot import __version__
long_description = ''
setup(name='jackpot',
version=__version__,
packages=find_packages(),
description='',
long_description=long_description,
platforms=['any'],
install_requires=[
'jsonschema==2.6.0',
],
extras_require={},
)
|
[
"[email protected]"
] | |
ae518b92f0d9797b906239b9fe24dbbce0d6da8e
|
52c372d83179f7c4506d31ede4dbde97917abc74
|
/bob/db/nivl/models.py
|
d7c0faa9a8a9938e9e522ddd027b66bc01eadeff
|
[
"BSD-3-Clause"
] |
permissive
|
bioidiap/bob.db.nivl
|
af78c64a2b593f5cf9a8c3abe04690887022604d
|
d5f9282894f5e93a77d35f38c6964629f9ea80ab
|
refs/heads/master
| 2023-04-18T18:13:16.461281 | 2020-11-10T10:34:52 | 2020-11-10T10:34:52 | 283,998,033 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,163 |
py
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira<[email protected]>
# @date: Mon Oct 19 17:41:51 CEST 2015
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Table models and functionality for the Near-Infrared and Visible-Light (NIVL) Dataset
"""
import sqlalchemy
from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey, or_, and_, not_
from bob.db.base.sqlalchemy_migration import Enum, relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.declarative import declarative_base
import bob.db.base
import os
Base = declarative_base()
""" Defining protocols. Yes, they are static """
PROTOCOLS = ( 'idiap-comparison_2011-VIS-NIR', \
'idiap-comparison_2012-VIS-NIR', \
'idiap-search_VIS-NIR_split1', \
'idiap-search_VIS-NIR_split2', \
'idiap-search_VIS-NIR_split3', \
'idiap-search_VIS-NIR_split4', \
'idiap-search_VIS-NIR_split5', \
'idiap-search_VIS-VIS_split1', \
'idiap-search_VIS-VIS_split2', \
'idiap-search_VIS-VIS_split3', \
'idiap-search_VIS-VIS_split4', \
'idiap-search_VIS-VIS_split5', \
'original_2011-2012', \
'original_2012-2011')
GROUPS = ('world', 'dev', 'eval')
PURPOSES = ('train', 'enroll', 'probe')
class Client(Base):
"""
Information about the clients (identities) of the LDHF.
"""
__tablename__ = 'client'
id = Column(String(10), primary_key=True)
def __init__(self, id, group):
self.id = id
self.group = group
def __repr__(self):
return "<Client({0})>".format(self.id)
class Annotation(Base):
"""
- Annotation.id
- x
- y
"""
__tablename__ = 'annotation'
file_id = Column(Integer, ForeignKey('file.id'), primary_key=True)
le_x = Column(Integer)
le_y = Column(Integer)
re_x = Column(Integer)
re_y = Column(Integer)
def __init__(self, file_id, le_x, le_y, re_x, re_y):
self.file_id = file_id
self.le_x = le_x
self.le_y = le_y
self.re_x = re_x
self.re_y = re_y
def __repr__(self):
return "<Annotation(file_id:{0}, le_x={1}, le_y={2}), re_x={3}, re_y={4})>".format(self.file_id, self.le_x, self.le_y, self.re_x, self.re_y)
class File(Base, bob.db.base.File):
"""
Information about the files of the LDHF database.
Each file includes
* the client id
"""
__tablename__ = 'file'
modality_choices = ('VIS', 'NIR')
id = Column(Integer, primary_key=True)
path = Column(String(100), unique=True)
client_id = Column(Integer, ForeignKey('client.id'))
modality = Column(Enum(*modality_choices))
session = Column(Integer)
year = Column(Integer)
# a back-reference from the client class to a list of files
client = relationship("Client", backref=backref("files", order_by=id))
all_annotations = relationship("Annotation", backref=backref("file"), uselist=True)
def __init__(self, file_id, image_name, client_id, modality, session, year):
# call base class constructor
bob.db.base.File.__init__(self, file_id = file_id, path = image_name)
self.client_id = client_id
self.modality = modality
self.session = session
self.year = year
def annotations(self, annotation_type="eyes_center"):
assert len(self.all_annotations)==1
if annotation_type=="eyes_center":
return {'reye' : (self.all_annotations[0].re_y, self.all_annotations[0].re_x ), 'leye' : (self.all_annotations[0].le_y, self.all_annotations[0].le_x) }
else:
raise ValueError("Annotations type {0} invalid. Only 'eyes_center' is allowed".format(annotation_type))
return data
class Protocol_File_Association(Base):
"""
Describe the protocols
"""
__tablename__ = 'protocol_file_association'
protocol = Column('protocol', Enum(*PROTOCOLS), primary_key=True)
group = Column('group', Enum(*GROUPS), primary_key=True)
purpose = Column('purpose', Enum(*PURPOSES), primary_key=True)
file_id = Column('file_id', Integer, ForeignKey('file.id'), primary_key=True)
#client_id = Column('client_id', Integer, ForeignKey('client.id'), primary_key=True)
def __init__(self, protocol, group, purpose, file_id):
self.protocol = protocol
self.group = group
self.purpose = purpose
self.file_id = file_id
#self.client_id = client_id
|
[
"[email protected]"
] | |
d73652512bd558ac8c51d7ab86b3ae12c6a99bbb
|
a1439f8dfaf14e61720dcde463e0c8731e497526
|
/pao/mpr/examples/moore.py
|
0b20dd6de48dd9b40b8632d61a2f6982c475285a
|
[
"BSD-3-Clause"
] |
permissive
|
whart222/pao
|
e5ef57baa073facb9d3ce8dc8e86b80d37aa90f3
|
3a80767ef5082be4dd98dd2f38000ffb96d2327c
|
refs/heads/master
| 2023-06-15T06:25:07.564944 | 2021-07-08T14:18:02 | 2021-07-08T14:18:02 | 265,347,819 | 0 | 0 | null | 2020-05-19T19:43:12 | 2020-05-19T19:43:11 | null |
UTF-8
|
Python
| false | false | 662 |
py
|
#
# Example from
# Moore, J. and J. Bard 1990.
# The mixed integer linear bilevel programming problem.
# Operations Research 38(5), 911–921.
#
from pao.mpr import *
def create():
M = LinearMultilevelProblem()
U = M.add_upper(nxZ=1)
L = U.add_lower(nxZ=1)
U.c[U] = [-1]
U.c[L] = [-10]
L.c[L] = [1]
L.A[U] = [[-25],
[1],
[2],
[-2]]
L.A[L] = [[20],
[2],
[-1],
[-10]]
L.b = [30,10,15,-15]
return M
if __name__ == "__main__": #pragma: no cover
M = create()
M.print()
opt = Solver('pao.mpr.FA')
opt.solve(M)
|
[
"[email protected]"
] | |
56b0ec2eed72c8ac954b8c2725eb4f65f08a5aba
|
a8eb3ee2225bea109985ed463b146ef887e8822e
|
/features/create_features_all4.py
|
43825408ee77553edaec1b446ced3983038a85fc
|
[] |
no_license
|
zakopuro/kaggle_Riiid
|
58148deb1c0f1d3672d44aa9c8ba5acf35180694
|
557caf57dc2460733e64cf24a7b547c7eee27363
|
refs/heads/main
| 2023-02-14T19:50:00.398300 | 2021-01-11T11:30:36 | 2021-01-11T11:30:36 | 302,792,953 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 44,739 |
py
|
import pandas as pd
import numpy as np
import gc
import os
from tqdm import tqdm
from collections import defaultdict
from base_create_features import Feature, get_arguments, generate_features
from pathlib import Path
import pickle
import dill
import sys
sys.path.append('./')
from utils import data_util,logger
import time
from sklearn.decomposition import PCA
Feature.dir = 'features/all_data'
TARGET = 'answered_correctly'
def _label_encoder(data):
l_data,_ =data.factorize(sort=True)
if l_data.max()>32000:
l_data = l_data.astype('int32')
else:
l_data = l_data.astype('int16')
if data.isnull().sum() > 0:
l_data = np.where(l_data == -1,np.nan,l_data)
return l_data
# リークしているけどある程度は仕方ないか。
def target_encoding(data,feature_list):
group_feature = feature_list.copy()
group_feature += TARGET
feature_name = '-'.join(feature_list)
mean_data = data[group_feature].groupby(feature_list).mean()
mean_data.columns = [f'{feature_name}_mean']
return mean_data
class BASE_FIX(Feature):
def create_features(self):
self.train = pd.read_feather('./data/train_valid/cv1_train_all.feather')
self.valid = pd.read_feather('./data/train_valid/cv1_valid_all.feather')
# self.train = self.train.iloc[-40000000:]
self.train = self.train.loc[self.train.content_type_id == False].reset_index(drop = True)
self.valid = self.valid.loc[self.valid.content_type_id == False].reset_index(drop = True)
# Changing dtype to avoid lightgbm error
self.train['prior_question_had_explanation'] = self.train.prior_question_had_explanation.fillna(False).astype('int8')
self.valid['prior_question_had_explanation'] = self.valid.prior_question_had_explanation.fillna(False).astype('int8')
# Fill prior question elapsed time with the mean
# prior_question_elapsed_time_mean = self.train['prior_question_elapsed_time'].dropna().mean()
# self.train['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean, inplace = True)
# self.valid['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean, inplace = True)
qs = pd.read_csv('./data/input/questions.csv')
tag = qs["tags"].str.split(" ",expand = True)
tag.columns = ['tags1','tags2','tags3','tags4','tags5','tags6']
qs = pd.concat([qs,tag],axis=1)
qs_cmnts = pd.read_csv('./data/input/question_cmnts.csv')
qs = pd.merge(qs,qs_cmnts,on='question_id',how='left')
qs = qs.rename(columns={'question_id':'content_id'})
self.train = pd.merge(self.train,qs,on='content_id',how='left')
self.valid = pd.merge(self.valid,qs,on='content_id',how='left')
for i in range(1,7):
self.train[f'tags{i}'] = self.train[f'tags{i}'].astype(float)
self.valid[f'tags{i}'] = self.valid[f'tags{i}'].astype(float)
self.train['part_community'] = self.train['part'] * 10 + self.train['community']
self.valid['part_community'] = self.valid['part'] * 10 + self.valid['community']
self.train = self.train.sort_values('row_id').reset_index(drop=True)
self.valid = self.valid.sort_values('row_id').reset_index(drop=True)
self.test = pd.DataFrame()
class TAGS(Feature):
def create_features(self):
create_feats = ["tags_pca_0", "tags_pca_1",'tags_nan_count','tags_nan_count_mean','tags1_cut_mean','tags1_cut']
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
qs = pd.read_csv('./data/input/questions.csv')
lst = []
for tags in qs["tags"]:
ohe = np.zeros(188)
if str(tags) != "nan":
for tag in tags.split():
ohe += np.eye(188)[int(tag)]
lst.append(ohe)
tags_df = pd.DataFrame(lst, columns=[f"tag_{i}" for i in range(188)]).astype(int)
pca = PCA(n_components=2)
X_2d = pca.fit_transform(tags_df.values)
pca_feat_df = pd.DataFrame(X_2d, columns=["tags_pca_0", "tags_pca_1"])
pca_feat_df["content_id"] = qs["question_id"]
self.train = pd.merge(self.train,pca_feat_df,on='content_id',how='left')
self.valid = pd.merge(self.valid,pca_feat_df,on='content_id',how='left')
# tagsのnanの数
self.train['tags_nan_count'] = self.train[['tags1','tags2','tags3','tags4','tags5','tags6']].isnull().sum(axis=1)
self.valid['tags_nan_count'] = self.valid[['tags1','tags2','tags3','tags4','tags5','tags6']].isnull().sum(axis=1)
self.train.loc[self.train['tags_nan_count'] >= 5, 'tags_nan_count'] = 5
self.valid.loc[self.valid['tags_nan_count'] >= 5, 'tags_nan_count'] = 5
tags_nan_count_mean = self.train[[TARGET,'tags_nan_count']].groupby('tags_nan_count').mean().reset_index()
tags_nan_count_mean.columns = ['tags_nan_count','tags_nan_count_mean']
self.train = pd.merge(self.train,tags_nan_count_mean,on='tags_nan_count',how='left')
self.valid = pd.merge(self.valid,tags_nan_count_mean,on='tags_nan_count',how='left')
# top tags1
top_tags1 = [143., 73., 79., 96., 131., 1., 10., 80., 133., 8., 123.,151., 53., 23., 9., 62., 136., 74., 157., 55.]
self.train['tags1_cut'] = self.train['tags1']
self.valid['tags1_cut'] = self.valid['tags1']
self.train.loc[~(self.train['tags1_cut'].isin(top_tags1)) , 'tags1_cut'] = 9999
self.valid.loc[~(self.valid['tags1_cut'].isin(top_tags1)) , 'tags1_cut'] = 9999
tags1_cut_mean = self.train[[TARGET,'tags1_cut']].groupby('tags1_cut').mean().reset_index()
tags1_cut_mean.columns = ['tags1_cut','tags1_cut_mean']
self.train = pd.merge(self.train,tags1_cut_mean,on='tags1_cut',how='left')
self.valid = pd.merge(self.valid,tags1_cut_mean,on='tags1_cut',how='left')
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
pca_feat_df.to_feather(f'./{Feature.dir}/pca_tags.feather')
tags_nan_count_mean.to_feather(f'./{Feature.dir}/tags_nan_count_mean.feather')
tags1_cut_mean.to_feather(f'./{Feature.dir}/tags1_cut_mean.feather')
class GROUP_BY(Feature):
def create_features(self):
create_feats = ['ans_part_mean','ans_community_mean','ans_part_community_mean']
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
part_mean = self.train[[TARGET,'part']].groupby('part').mean().reset_index()
part_mean.columns = ['part','ans_part_mean']
self.train = pd.merge(self.train,part_mean,on='part',how='left')
self.valid = pd.merge(self.valid,part_mean,on='part',how='left')
part_mean.to_feather(f'./{Feature.dir}/part_mean.feather')
community_mean = self.train[[TARGET,'community']].groupby('community').mean().reset_index()
community_mean.columns = ['community','ans_community_mean']
self.train = pd.merge(self.train,community_mean,on='community',how='left')
self.valid = pd.merge(self.valid,community_mean,on='community',how='left')
community_mean.to_feather(f'./{Feature.dir}/community_mean.feather')
part_community_mean = self.train[[TARGET,'part_community']].groupby('part_community').mean().reset_index()
part_community_mean.columns = ['part_community','ans_part_community_mean']
self.train = pd.merge(self.train,part_community_mean,on='part_community',how='left')
self.valid = pd.merge(self.valid,part_community_mean,on='part_community',how='left')
part_community_mean.to_feather(f'./{Feature.dir}/part_community.feather')
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
class LOOP_FIX4(Feature):
# User 組み合わせ特徴量更新
def update_user_arg_feats(self,user_id,col,target,
features_dicts,
ans_user_args_list_name):
if len(features_dicts[ans_user_args_list_name][user_id][col]) == 0:
features_dicts[ans_user_args_list_name][user_id][col] = [0,0]
features_dicts[ans_user_args_list_name][user_id][col][1] += target
features_dicts[ans_user_args_list_name][user_id][col][0] += 1
# Userと組み合わせ特徴量作成
def create_user_args_feats(self,num,user_id,col,
features_dicts,feats_np_dic,
ans_user_args_list_name = None,
ans_user_args_count_name = None,ans_user_args_avg_name = None):
if len(features_dicts[ans_user_args_list_name][user_id][col]) == 0:
features_dicts[ans_user_args_list_name][user_id][col] = [0,0]
if features_dicts[ans_user_args_list_name][user_id][col][0] != 0:
feats_np_dic[ans_user_args_avg_name][num] = features_dicts[ans_user_args_list_name][user_id][col][1]/features_dicts[ans_user_args_list_name][user_id][col][0]
else:
feats_np_dic[ans_user_args_avg_name][num] = np.nan
feats_np_dic[ans_user_args_count_name][num] = features_dicts[ans_user_args_list_name][user_id][col][0]
# TODO:数値変わるかも
def create_first_bundle(self,num,user_id,bundle_id,
features_dicts,
feats_np_dic):
if len(features_dicts['user_list'][user_id]) < 5:
features_dicts['user_list'][user_id].append(bundle_id)
feats_np_dic['first_bundle'][num] = features_dicts['user_list'][user_id][4]
def update_lag_incorrect_feats(self,user_id,timestamp,target,
features_dicts):
if target == 0:
if len(features_dicts['lag_user_incorrect_time'][user_id]) == 1:
features_dicts['lag_user_incorrect_time'][user_id].pop(0)
features_dicts['lag_user_incorrect_time'][user_id].append(timestamp)
else:
features_dicts['lag_user_incorrect_time'][user_id].append(timestamp)
def update_part_lag_incorrect_feats(self,user_id,part,timestamp,target,
features_dicts):
if target == 0:
if len(features_dicts['lag_user_part_incorrect_time'][user_id][part]) == 1:
features_dicts['lag_user_part_incorrect_time'][user_id][part].pop(0)
features_dicts['lag_user_part_incorrect_time'][user_id][part].append(timestamp)
else:
features_dicts['lag_user_part_incorrect_time'][user_id][part].append(timestamp)
def update_args_time_feats(self,col,prior_question_elapsed_time,prior_question_had_explanation,
features_dicts,
feat_list_name):
'''
0 : count
1 : sum
2 : elapsed_time_sum
3 : explanation_sum
'''
if len(features_dicts[feat_list_name][col]) == 0:
features_dicts[feat_list_name][col] = [0 for _ in range(4)]
features_dicts[feat_list_name][col][2] += prior_question_elapsed_time
features_dicts[feat_list_name][col][3] += prior_question_had_explanation
def update_lag_time_feats(self,user_id,timestamp,
features_dicts):
if len(features_dicts['lag_user_time'][user_id]) == 3:
features_dicts['lag_user_time'][user_id].pop(0)
features_dicts['lag_user_time'][user_id].append(timestamp)
else:
features_dicts['lag_user_time'][user_id].append(timestamp)
def update_create_user_arg_count(self,num,user_id,col,
features_dicts,feats_np_dic,
user_args_count_dic_name,
user_args_count_name):
feats_np_dic[user_args_count_name][num] = features_dicts[user_args_count_dic_name][user_id][col]
# update
features_dicts[user_args_count_dic_name][user_id][col] += 1
def update_args_feats(self,col,feat_list_name,
target,
features_dicts,n):
'''
0 : count
1 : sum
2 : elapsed_time_sum
3 : explanation_sum
'''
if len(features_dicts[feat_list_name][col]) == 0:
features_dicts[feat_list_name][col] = [0 for _ in range(n)]
features_dicts[feat_list_name][col][1] += target
features_dicts[feat_list_name][col][0] += 1
def create_arg_feats(self,num,col,
features_dicts,feats_np_dic,
list_name,
ans_avg_name = None, elapsed_time_avg_name = None, explanation_avg_name = None, elapsed_time_sum_feat_name = None):
'''
0 : count
1 : sum
2 : elapsed_time_sum
3 : explanation_sum
'''
if (len(features_dicts[list_name][col]) >= 4):
if (features_dicts[list_name][col][0] >= 1):
feats_np_dic[ans_avg_name][num] = features_dicts[list_name][col][1] / features_dicts[list_name][col][0]
feats_np_dic[elapsed_time_avg_name][num] = features_dicts[list_name][col][2] / features_dicts[list_name][col][0]
feats_np_dic[explanation_avg_name][num] = features_dicts[list_name][col][3] / features_dicts[list_name][col][0]
if elapsed_time_sum_feat_name is not None:
feats_np_dic[elapsed_time_sum_feat_name][num] = features_dicts[list_name][col][2]
else:
feats_np_dic[ans_avg_name][num] = np.nan
feats_np_dic[elapsed_time_avg_name][num] = np.nan
feats_np_dic[explanation_avg_name][num] = np.nan
if elapsed_time_sum_feat_name is not None:
feats_np_dic[elapsed_time_sum_feat_name][num] = np.nan
else:
feats_np_dic[ans_avg_name][num] = np.nan
feats_np_dic[elapsed_time_avg_name][num] = np.nan
feats_np_dic[explanation_avg_name][num] = np.nan
if elapsed_time_sum_feat_name is not None:
feats_np_dic[elapsed_time_sum_feat_name][num] = np.nan
# ユーザー毎のlag特徴量作成
def create_lag_time_feats(self,num,user_id,timestamp,
features_dicts,
feats_np_dic):
if len(features_dicts['lag_user_time'][user_id]) == 0:
feats_np_dic['lag_time_1'][num] = np.nan
feats_np_dic['lag_time_2'][num] = np.nan
feats_np_dic['lag_time_3'][num] = np.nan
elif len(features_dicts['lag_user_time'][user_id]) == 1:
feats_np_dic['lag_time_1'][num] = timestamp - features_dicts['lag_user_time'][user_id][0]
feats_np_dic['lag_time_2'][num] = np.nan
feats_np_dic['lag_time_3'][num] = np.nan
elif len(features_dicts['lag_user_time'][user_id]) == 2:
feats_np_dic['lag_time_1'][num] = timestamp - features_dicts['lag_user_time'][user_id][1]
feats_np_dic['lag_time_2'][num] = timestamp - features_dicts['lag_user_time'][user_id][0]
feats_np_dic['lag_time_3'][num] = np.nan
elif len(features_dicts['lag_user_time'][user_id]) == 3:
feats_np_dic['lag_time_1'][num] = timestamp - features_dicts['lag_user_time'][user_id][2]
feats_np_dic['lag_time_2'][num] = timestamp - features_dicts['lag_user_time'][user_id][1]
feats_np_dic['lag_time_3'][num] = timestamp - features_dicts['lag_user_time'][user_id][0]
if len(features_dicts['lag_user_incorrect_time'][user_id]) == 0:
feats_np_dic['lag_incorrect_time'][num] = np.nan
else:
feats_np_dic['lag_incorrect_time'][num] = timestamp - features_dicts['lag_user_incorrect_time'][user_id][0]
# rolling mean
def create_user_ans_rolling_mean(self,num,user_id,timestamp,
features_dicts,
feats_np_dic):
if len(features_dicts['user_past_ans'][user_id]) == 5:
feats_np_dic['user_rolling_ans_mean10'][num] = np.mean(features_dicts['user_past_ans'][user_id])
else:
feats_np_dic['user_rolling_ans_mean10'][num] = np.nan
def update_user_ans_list(self,user_id,target,
features_dicts):
if len(features_dicts['user_past_ans'][user_id]) == 5:
features_dicts['user_past_ans'][user_id].pop(0)
features_dicts['user_past_ans'][user_id].append(target)
else:
features_dicts['user_past_ans'][user_id].append(target)
def update_part_lag_time_feats(self,user_id,part,timestamp,
features_dicts):
if len(features_dicts['lag_user_part_time'][user_id][part]) == 3:
features_dicts['lag_user_part_time'][user_id][part].pop(0)
features_dicts['lag_user_part_time'][user_id][part].append(timestamp)
else:
features_dicts['lag_user_part_time'][user_id][part].append(timestamp)
def create_part_lag_time_feats(self,num,user_id,part,timestamp,
features_dicts,
feats_np_dic):
if len(features_dicts['lag_user_part_time'][user_id][part]) == 0:
feats_np_dic['lag_part_time_1'][num] = np.nan
feats_np_dic['lag_part_time_2'][num] = np.nan
feats_np_dic['lag_part_time_3'][num] = np.nan
elif len(features_dicts['lag_user_part_time'][user_id][part]) == 1:
feats_np_dic['lag_part_time_1'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][0]
feats_np_dic['lag_part_time_2'][num] = np.nan
feats_np_dic['lag_part_time_3'][num] = np.nan
elif len(features_dicts['lag_user_part_time'][user_id][part]) == 2:
feats_np_dic['lag_part_time_1'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][1]
feats_np_dic['lag_part_time_2'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][0]
feats_np_dic['lag_part_time_3'][num] = np.nan
elif len(features_dicts['lag_user_part_time'][user_id][part]) == 3:
feats_np_dic['lag_part_time_1'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][2]
feats_np_dic['lag_part_time_2'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][1]
feats_np_dic['lag_part_time_3'][num] = timestamp - features_dicts['lag_user_part_time'][user_id][part][0]
# if len(features_dicts['lag_user_part_incorrect_time'][user_id][part]) == 0:
# feats_np_dic['lag_part_incorrect_time'][num] = np.nan
# else:
# feats_np_dic['lag_part_incorrect_time'][num] = timestamp - features_dicts['lag_user_part_incorrect_time'][user_id][part][0]
def update_feats(self,previous_row,features_dicts):
# メモリ削減のため型変換
user_id = int(previous_row[0])
target = int(previous_row[1])
content_id = int(previous_row[2])
prior_question_elapsed_time = previous_row[3]
prior_question_had_explanation = int(previous_row[4])
timestamp = int(previous_row[5])
bundle_id = int(previous_row[6])
part = int(previous_row[7])
community = int(previous_row[8])
# lag time
self.update_lag_incorrect_feats(user_id,timestamp,target,
features_dicts)
# User args feats
create_lists = [[part,
'user_part_list', # dic
'user_part_count','ans_user_part_avg'] # np
]
for create_list in create_lists:
self.update_user_arg_feats(user_id,create_list[0],target,
features_dicts,
create_list[1])
# arg feats
create_lists = [[user_id,
'user_list'],
[content_id,
'content_list']]
for create_list in create_lists:
self.update_args_feats(create_list[0],create_list[1],
target,
features_dicts,
n=4)
# # rolling mean
# self.update_user_ans_list(user_id,target,
# features_dicts)
# 過去分アップデート
def update_previous(self,features_dicts,previous_df):
for previous_row in previous_df:
self.update_feats(previous_row,features_dicts)
# dataframeに格納するnpを一括作成
def create_datas(self,df):
df_name_float_list = [
# User
'ans_user_avg',
'ans_user_count',
'elapsed_time_user_avg',
'elapsed_time_user_sum',
'explanation_user_avg',
'first_bundle',
# content_id
'ans_content_avg',
'elapsed_time_content_avg',
'explanation_content_avg',
# user lag time
'lag_time_1',
'lag_time_2',
'lag_time_3',
'lag_incorrect_time',
# User Part lag time
'lag_part_time_1',
'lag_part_time_2',
'lag_part_time_3',
# User Part
'ans_user_part_avg'
]
df_name_int_list = [
# User Content
'user_content_count',
# User Part
'user_part_count'
]
feats_np_dic = {}
for name in df_name_float_list:
feats_np_dic[name] = np.zeros(len(df), dtype = np.float32)
for name in df_name_int_list:
feats_np_dic[name] = np.zeros(len(df), dtype = np.int32)
return feats_np_dic
def add_past_feature(self,df, features_dicts,_update = True):
# 特徴量格納dicを作成
feats_np_dic = self.create_datas(df)
previous_bundle_id = None
previous_user_id = None
previous_row = None
update_cnt = 0
previous_df = []
for num, row in enumerate(tqdm(df[['user_id', 'answered_correctly', 'content_id', 'prior_question_elapsed_time',
'prior_question_had_explanation', 'timestamp','bundle_id','part','community']].values)):
# メモリ削減のため型変換
user_id = int(row[0])
target = int(row[1])
content_id = int(row[2])
prior_question_elapsed_time = row[3]
prior_question_had_explanation = int(row[4])
timestamp = int(row[5])
bundle_id = int(row[6])
part = int(row[7])
community = int(row[8])
update = _update
# 前回とbundle_idが同じ時は更新しない
if (previous_bundle_id == bundle_id) & (previous_user_id == user_id) & (_update):
update = False
if update_cnt == 0:
previous_df.append(previous_row)
previous_df.append(row)
update_cnt += 1
# 溜まっていたら過去情報をupdate
if (update) & (len(previous_df) > 0):
self.update_previous(features_dicts,previous_df)
previous_df = []
update_cnt = 0
update = False
if (update) & (previous_row is not None):
self.update_feats(previous_row,features_dicts)
previous_bundle_id = bundle_id
previous_user_id = user_id
previous_row = row
# Args
create_lists = [[user_id,
'user_list', # dic
'ans_user_avg','elapsed_time_user_avg','explanation_user_avg','elapsed_time_user_sum'], # np
[content_id,
'content_list', # dic
'ans_content_avg','elapsed_time_content_avg','explanation_content_avg',None] # np
]
for create_list in create_lists:
self.create_arg_feats(num,create_list[0],
features_dicts,feats_np_dic,
create_list[1],
create_list[2],create_list[3],create_list[4],create_list[5])
# 常に更新
self.update_args_time_feats(create_list[0],prior_question_elapsed_time,prior_question_had_explanation,
features_dicts,
create_list[1])
# First bundle
self.create_first_bundle(num,user_id,bundle_id,
features_dicts,
feats_np_dic)
# lag time
self.create_lag_time_feats(num,user_id,timestamp,
features_dicts,
feats_np_dic)
self.create_part_lag_time_feats(num,user_id,part,timestamp,
features_dicts,
feats_np_dic)
# # rolling mean
# self.create_user_ans_rolling_mean(num,user_id,target,
# features_dicts,
# feats_np_dic)
# User args feats
# ------------------------------------------------------------------
create_lists = [[part,
'user_part_list', # dic
'user_part_count','ans_user_part_avg'], # np
]
for create_list in create_lists:
self.create_user_args_feats(num,user_id,create_list[0],
features_dicts,feats_np_dic,
create_list[1],
create_list[2],create_list[3]
)
# TODO:Updateに回すべきか
# User args count
# ------------------------------------------------------------------
create_lists = [[content_id,
'user_content_count', # dic
'user_content_count'] # np
]
for create_list in create_lists:
self.update_create_user_arg_count(num,user_id,create_list[0],
features_dicts,feats_np_dic,
create_list[1],
create_list[2])
# TODO : カウントは毎回更新するべきか
# User count
feats_np_dic['ans_user_count'][num] = features_dicts['user_list'][user_id][0]
# Update
# ------------------------------------------------------------------
# lag time
# ------------------------------------------------------------------
self.update_lag_time_feats(user_id,timestamp,
features_dicts)
self.update_part_lag_time_feats(user_id,part,timestamp,
features_dicts)
# count
# ------------------------------------------------------------------
# features_dicts['ans_user_count'][user_id] += 1
# features_dicts['ans_content_count'][content_id] += 1
loop_feats_df = pd.DataFrame(feats_np_dic)
df = pd.concat([df, loop_feats_df], axis = 1)
return df,feats_np_dic.keys()
def create_dics(self):
features_dicts = {}
list_name = [
# User
'user_list', # 'ans_user_count','ans_user_sum','elapsed_time_user_sum','explanation_user_sum','user_first_bundle'
# content_id
'content_list', # 'ans_content_count','ans_content_sum','elapsed_time_content_sum','explanation_content_sum'
# User Time
'lag_user_time',
'lag_user_incorrect_time'
]
lambda_int_name = [
# User content_id
'user_content_count',
# User bundle_id
'user_bundle_count'
]
lambda_list_name = [
# User Part Time
'lag_user_part_time',
'lag_user_part_incorrect_time',
# User Part
'user_part_list'
]
for name in list_name:
features_dicts[name] = defaultdict(list)
for name in lambda_int_name:
features_dicts[name] = defaultdict(lambda: defaultdict(int))
for name in lambda_list_name:
features_dicts[name] = defaultdict(lambda: defaultdict(list))
return features_dicts
def create_features(self):
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
self.train['prior_question_elapsed_time'] = self.train['prior_question_elapsed_time'].fillna(0)
self.valid['prior_question_elapsed_time'] = self.valid['prior_question_elapsed_time'].fillna(0)
features_dicts = self.create_dics()
self.train , _ = self.add_past_feature(self.train, features_dicts)
self.valid , create_feats = self.add_past_feature(self.valid, features_dicts)
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
with open(f'./features/all_data/loop_feats4.dill','wb') as f:
dill.dump(features_dicts,f)
class BUNDLE_ID(Feature):
def create_features(self):
create_feats = ['first_bundle_cut','first_bundle_cut_mean']
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
train_loop = pd.read_feather(f'./{Feature.dir}/LOOP_FIX2_train.feather')[['first_bundle']]
valid_loop = pd.read_feather(f'./{Feature.dir}/LOOP_FIX2_valid.feather')[['first_bundle']]
self.train = pd.concat([self.train,train_loop],axis=1)
self.valid = pd.concat([self.valid,valid_loop],axis=1)
top_first_bundle = [7900,128,5692,7876,2063,3363,1278,175,1232,4528]
self.train['first_bundle_cut'] = self.train['first_bundle']
self.valid['first_bundle_cut'] = self.valid['first_bundle']
self.train.loc[~(self.train['first_bundle_cut'].isin(top_first_bundle)) ,'first_bundle_cut'] = 9999
self.valid.loc[~(self.valid['first_bundle_cut'].isin(top_first_bundle)) ,'first_bundle_cut'] = 9999
first_bundle_cut_mean = self.train[[TARGET,'first_bundle_cut']].groupby('first_bundle_cut').mean().reset_index()
first_bundle_cut_mean.columns = ['first_bundle_cut','first_bundle_cut_mean']
first_bundle_cut_mean.to_feather(f'./{Feature.dir}/first_bundle_cut_mean.feather')
self.train = pd.merge(self.train,first_bundle_cut_mean,on='first_bundle_cut',how='left')
self.valid = pd.merge(self.valid,first_bundle_cut_mean,on='first_bundle_cut',how='left')
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
class ROLLING_MEAN2(Feature):
# rolling mean
def create_user_ans_rolling_mean(self,num,user_id,timestamp,
features_dicts,
feats_np_dic,
n):
if len(features_dicts[f'user_past_ans_{n}'][user_id]) == n:
feats_np_dic[f'rolling_mean_{n}'][num] = features_dicts[f'user_past_ans_{n}'][user_id].count('1')/n
else:
feats_np_dic[f'rolling_mean_{n}'][num] = np.nan
def update_user_ans_list(self,user_id,target,
features_dicts,
n):
if len(features_dicts[f'user_past_ans_{n}'][user_id]) == n:
features_dicts[f'user_past_ans_{n}'][user_id] = features_dicts[f'user_past_ans_{n}'][user_id][1:]
features_dicts[f'user_past_ans_{n}'][user_id] += str(target)
else:
features_dicts[f'user_past_ans_{n}'][user_id] += str(target)
def update_feats(self,previous_row,features_dicts):
# メモリ削減のため型変換
user_id = int(previous_row[0])
target = int(previous_row[1])
content_id = int(previous_row[2])
prior_question_elapsed_time = previous_row[3]
prior_question_had_explanation = int(previous_row[4])
timestamp = int(previous_row[5])
bundle_id = int(previous_row[6])
part = int(previous_row[7])
community = int(previous_row[8])
# rolling mean
self.update_user_ans_list(user_id,target,
features_dicts,
n=10)
self.update_user_ans_list(user_id,target,
features_dicts,
n=3)
# 過去分アップデート
def update_previous(self,features_dicts,previous_df):
for previous_row in previous_df:
self.update_feats(previous_row,features_dicts)
# dataframeに格納するnpを一括作成
def create_datas(self,df):
df_name_float_list = [
# User
'rolling_mean_10',
'rolling_mean_3'
]
feats_np_dic = {}
for name in df_name_float_list:
feats_np_dic[name] = np.zeros(len(df), dtype = np.float32)
return feats_np_dic
def add_past_feature(self,df, features_dicts,_update = True):
# 特徴量格納dicを作成
feats_np_dic = self.create_datas(df)
previous_bundle_id = None
previous_user_id = None
previous_row = None
update_cnt = 0
previous_df = []
for num, row in enumerate(tqdm(df[['user_id', 'answered_correctly', 'content_id', 'prior_question_elapsed_time',
'prior_question_had_explanation', 'timestamp','bundle_id','part','community']].values)):
# メモリ削減のため型変換
user_id = int(row[0])
target = int(row[1])
content_id = int(row[2])
prior_question_elapsed_time = row[3]
prior_question_had_explanation = int(row[4])
timestamp = int(row[5])
bundle_id = int(row[6])
part = int(row[7])
community = int(row[8])
update = _update
# 前回とbundle_idが同じ時は更新しない
if (previous_bundle_id == bundle_id) & (previous_user_id == user_id) & (_update):
update = False
if update_cnt == 0:
previous_df.append(previous_row)
previous_df.append(row)
update_cnt += 1
# 溜まっていたら過去情報をupdate
if (update) & (len(previous_df) > 0):
self.update_previous(features_dicts,previous_df)
previous_df = []
update_cnt = 0
update = False
if (update) & (previous_row is not None):
self.update_feats(previous_row,features_dicts)
previous_bundle_id = bundle_id
previous_user_id = user_id
previous_row = row
self.create_user_ans_rolling_mean(num,user_id,target,
features_dicts,
feats_np_dic,
n=10)
self.create_user_ans_rolling_mean(num,user_id,target,
features_dicts,
feats_np_dic,
n=3)
loop_feats_df = pd.DataFrame(feats_np_dic)
df = pd.concat([df, loop_feats_df], axis = 1)
return df,feats_np_dic.keys()
def create_dics(self):
features_dicts = {}
str_name = [
# User rolling
'user_past_ans_10',
'user_past_ans_3'
]
for name in str_name:
features_dicts[name] = defaultdict(str)
return features_dicts
def create_features(self):
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
self.train['prior_question_elapsed_time'] = self.train['prior_question_elapsed_time'].fillna(0)
self.valid['prior_question_elapsed_time'] = self.valid['prior_question_elapsed_time'].fillna(0)
features_dicts = self.create_dics()
self.train , _ = self.add_past_feature(self.train, features_dicts)
self.valid , create_feats = self.add_past_feature(self.valid, features_dicts)
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
with open(f'./features/all_data/loop_feats_rolling_mean2.dill','wb') as f:
dill.dump(features_dicts,f)
class ROLLING_PART_MEAN2(Feature):
# rolling mean
def create_user_ans_rolling_part_mean(self,num,user_id,part,
features_dicts,
feats_np_dic,
n):
if len(features_dicts[f'user_past_part_ans_{n}'][user_id][part]) == n:
feats_np_dic[f'rolling_part_mean_{n}'][num] = features_dicts[f'user_past_part_ans_{n}'][user_id][part].count('1')/n
else:
feats_np_dic[f'rolling_part_mean_{n}'][num] = np.nan
def update_user_part_ans_list(self,user_id,target,part,
features_dicts,
n):
if len(features_dicts[f'user_past_part_ans_{n}'][user_id]) == n:
features_dicts[f'user_past_part_ans_{n}'][user_id][part] = features_dicts[f'user_past_part_ans_{n}'][user_id][part][1:]
features_dicts[f'user_past_part_ans_{n}'][user_id][part] += str(target)
else:
features_dicts[f'user_past_part_ans_{n}'][user_id][part] += str(target)
def update_feats(self,previous_row,features_dicts):
# メモリ削減のため型変換
user_id = int(previous_row[0])
target = int(previous_row[1])
content_id = int(previous_row[2])
prior_question_elapsed_time = previous_row[3]
prior_question_had_explanation = int(previous_row[4])
timestamp = int(previous_row[5])
bundle_id = int(previous_row[6])
part = int(previous_row[7])
community = int(previous_row[8])
# rolling mean
self.update_user_part_ans_list(user_id,target,part,
features_dicts,
n=10)
self.update_user_part_ans_list(user_id,target,part,
features_dicts,
n=3)
# 過去分アップデート
def update_previous(self,features_dicts,previous_df):
for previous_row in previous_df:
self.update_feats(previous_row,features_dicts)
# dataframeに格納するnpを一括作成
def create_datas(self,df):
df_name_float_list = [
# User
'rolling_part_mean_10',
'rolling_part_mean_3'
]
feats_np_dic = {}
for name in df_name_float_list:
feats_np_dic[name] = np.zeros(len(df), dtype = np.float32)
return feats_np_dic
def add_past_feature(self,df, features_dicts,_update = True):
# 特徴量格納dicを作成
feats_np_dic = self.create_datas(df)
previous_bundle_id = None
previous_user_id = None
previous_row = None
update_cnt = 0
previous_df = []
for num, row in enumerate(tqdm(df[['user_id', 'answered_correctly', 'content_id', 'prior_question_elapsed_time',
'prior_question_had_explanation', 'timestamp','bundle_id','part','community']].values)):
# メモリ削減のため型変換
user_id = int(row[0])
target = int(row[1])
content_id = int(row[2])
prior_question_elapsed_time = row[3]
prior_question_had_explanation = int(row[4])
timestamp = int(row[5])
bundle_id = int(row[6])
part = int(row[7])
community = int(row[8])
update = _update
# 前回とbundle_idが同じ時は更新しない
if (previous_bundle_id == bundle_id) & (previous_user_id == user_id) & (_update):
update = False
if update_cnt == 0:
previous_df.append(previous_row)
previous_df.append(row)
update_cnt += 1
# 溜まっていたら過去情報をupdate
if (update) & (len(previous_df) > 0):
self.update_previous(features_dicts,previous_df)
previous_df = []
update_cnt = 0
update = False
if (update) & (previous_row is not None):
self.update_feats(previous_row,features_dicts)
previous_bundle_id = bundle_id
previous_user_id = user_id
previous_row = row
self.create_user_ans_rolling_part_mean(num,user_id,part,
features_dicts,
feats_np_dic,
n=10)
self.create_user_ans_rolling_part_mean(num,user_id,part,
features_dicts,
feats_np_dic,
n=3)
loop_feats_df = pd.DataFrame(feats_np_dic)
df = pd.concat([df, loop_feats_df], axis = 1)
return df,feats_np_dic.keys()
def create_dics(self):
features_dicts = {}
str_name = [
# User rolling
'user_past_part_ans_10',
'user_past_part_ans_3'
]
for name in str_name:
features_dicts[name] = defaultdict(lambda: defaultdict(str))
return features_dicts
def create_features(self):
self.train = pd.read_feather(f'./{Feature.dir}/BASE_FIX_train.feather')
self.valid = pd.read_feather(f'./{Feature.dir}/BASE_FIX_valid.feather')
self.train['prior_question_elapsed_time'] = self.train['prior_question_elapsed_time'].fillna(0)
self.valid['prior_question_elapsed_time'] = self.valid['prior_question_elapsed_time'].fillna(0)
features_dicts = self.create_dics()
self.train , _ = self.add_past_feature(self.train, features_dicts)
self.valid , create_feats = self.add_past_feature(self.valid, features_dicts)
self.train = self.train[create_feats]
self.valid = self.valid[create_feats]
with open(f'./features/all_data/loop_feats_rolling_part_mean2.dill','wb') as f:
dill.dump(features_dicts,f)
if __name__ == "__main__":
args = get_arguments()
generate_features(globals(),args.force)
|
[
"[email protected]"
] | |
ced8a84d60c79476996223a6ce4c035cde0cec50
|
e1b3816615cce62ebe2b6c59b0eb3fbd3693d73b
|
/solutions/606-construct-string-from-binary-tree/construct-string-from-binary-tree.py
|
392a31dd693a4f747d9d30c1798437fa800fed15
|
[] |
no_license
|
fagan2888/leetcode-6
|
1fb18979ffacb82d5db77988b38ecd7371b428b9
|
14176f1752e2bb94dec51bd90dfd412896ed84de
|
refs/heads/master
| 2022-01-10T03:27:51.388066 | 2019-06-15T14:13:48 | 2019-06-15T14:13:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,655 |
py
|
# -*- coding:utf-8 -*-
# You need to construct a string consists of parenthesis and integers from a binary tree with the preorder traversing way.
#
# The null node needs to be represented by empty parenthesis pair "()". And you need to omit all the empty parenthesis pairs that don't affect the one-to-one mapping relationship between the string and the original binary tree.
#
# Example 1:
#
# Input: Binary tree: [1,2,3,4]
# 1
# / \
# 2 3
# /
# 4
#
# Output: "1(2(4))(3)"
# Explanation: Originallay it needs to be "1(2(4)())(3()())", but you need to omit all the unnecessary empty parenthesis pairs. And it will be "1(2(4))(3)".
#
#
#
# Example 2:
#
# Input: Binary tree: [1,2,3,null,4]
# 1
# / \
# 2 3
# \
# 4
#
# Output: "1(2()(4))(3)"
# Explanation: Almost the same as the first example, except we can't omit the first parenthesis pair to break the one-to-one mapping relationship between the input and the output.
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
r=""
if t==None:
return r
else:
r+=str(t.val)
if t.left!=None:
r+="("+Solution.tree2str(self,t.left)+")"
elif t.right!=None:
r+="()"
if t.right!=None:
r+="("+Solution.tree2str(self,t.right)+")"
return r
|
[
"[email protected]"
] | |
c0fc556ea5948aed614ee57063c5122ab7b17078
|
ce0f8956c4c308c67bd700d31fe8d5a17b16ac08
|
/Python3/src/22 JSON and XML/XML/etree/03_createFile.py
|
2640cfed91f9383eba24796f1662d88f4bca71eb
|
[] |
no_license
|
seddon-software/python3
|
795ae8d22a172eea074b71d6cd49d79e388d8cc6
|
d5e6db1509a25c1a3040d5ae82d757539a2ff730
|
refs/heads/master
| 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,349 |
py
|
############################################################
#
# createFile.py
#
############################################################
from xml.etree.ElementTree import Element, ElementTree
from xml.etree.ElementTree import _namespace_map
"""
Code to generate the following document:
<book:book xmlns:book="http://www.demo.com/book">
<book:title>XMLBeans</book:title>
<book:author first="John" last="Smith" />
<book:publisher>Wiley</book:publisher>
<book:pubdate>2007-06+01:00</book:pubdate>
<book:cost>23.79</book:cost>
</book:book>
"""
# setup namespace and alias
ns = "http://www.demo.com/book"
uri = "{" + ns + "}"
_namespace_map[ns] = 'book'
# define elements
root = Element(uri + "book")
title = Element(uri + "title")
author = Element(uri + "author")
publisher = Element(uri + "publisher")
pubdate = Element(uri + "pubdate")
cost = Element(uri + "cost")
# add attributes
author.attrib["first"] = "John"
author.attrib["last"] = "Smith"
# add text
title.text = "XMLBeans"
publisher.text = "Wiley"
pubdate.text = "2007-06+01:00"
cost.text = "23.79"
# build tree
root.append(title)
root.append(author)
root.append(publisher)
root.append(pubdate)
root.append(cost)
# write to file
tree = ElementTree(root)
tree.write("xml/new_book.xml")
|
[
"[email protected]"
] | |
ba9181dae8856bb6fc00c53e168e202b8f15e7ea
|
697af415566ba649502bd18751a6521ac526892c
|
/2020_VERSIONS/get_hrrr_plots.py
|
7e56cb6022afdc1b32c5b2d2a320ab4ae25b9cd6
|
[] |
no_license
|
srbrodzik/impacts-scripts
|
df44c8f34746499b8397b5b1a4ad09859b4cc8d4
|
263c7545bbb912bbcea563a21d0619e5112b1788
|
refs/heads/master
| 2023-05-31T05:01:09.558641 | 2023-05-22T23:24:52 | 2023-05-22T23:24:52 | 215,638,568 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,693 |
py
|
import os
import sys
import time
from datetime import timedelta
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import shutil
def listFD(url, ext=''):
page = requests.get(url).text
#print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
# User inputs
debug = 1
secsPerDay = 86400
pastSecs = secsPerDay/4 # 6 hours
secsPerRun = secsPerDay/24
deltaBetweenForecastHours = 1
lastForecastHour = 6
hrrrUrl = 'https://tropicaltidbits.com/analysis/models/hrrr'
targetDirBase = '/home/disk/bob/impacts/model/hrrr_03km'
products = ['ref_frzn_us','ir_us','T2m_us']
has_anal_prod = [0,1,1]
catalogBaseDir = '/home/disk/funnel/impacts-website/archive/model/hrrr_03km'
# get model date and time closest to current time
nowTime = time.gmtime()
now = datetime(nowTime.tm_year, nowTime.tm_mon, nowTime.tm_mday,
nowTime.tm_hour, nowTime.tm_min, nowTime.tm_sec)
nowDateStr = now.strftime("%Y%m%d")
nowHourStr = now.strftime("%H")
lastModelDateTimeStr = nowDateStr+nowHourStr
if debug:
print >>sys.stderr, "lastModelDateTimeStr = ", lastModelDateTimeStr
# compute start time
pastDelta = timedelta(0, pastSecs)
lastModelDateTime = datetime.strptime(lastModelDateTimeStr,'%Y%m%d%H')
startTime = lastModelDateTime - pastDelta
startDateHourStr = startTime.strftime("%Y%m%d%H")
startDateStr = startTime.strftime("%Y%m%d")
if debug:
print >>sys.stderr, "startDateHourStr = ", startDateHourStr
# set up list of model runs to be checked
nRuns = (pastSecs / secsPerRun) + 1
dateStrList = []
dateHourStrList = []
for iRun in range(0, nRuns):
deltaSecs = timedelta(0, iRun * secsPerRun)
dayTime = lastModelDateTime - deltaSecs
dateStr = dayTime.strftime("%Y%m%d")
dateHourStr = dayTime.strftime("%Y%m%d%H")
dateStrList.append(dateStr)
dateHourStrList.append(dateHourStr)
if debug:
print >>sys.stderr, "dateHourStrList = ", dateHourStrList
for t in range(0,nRuns):
currentModelRun = dateHourStrList[t]
for i in range(0,len(products)):
if debug:
print >>sys.stderr, "Processing ", currentModelRun, " run for ", products[i], " data"
# get list of files on server for this run and this product
# only interested in forecasts up to and including 'lastForecastHour'
urlFileList = []
#urlDateList = []
#urlDateTimeList = []
url = hrrrUrl+'/'+dateHourStrList[t]+'/'
ext = 'png'
for file in listFD(url, ext):
tmp = os.path.basename(file)
(base,ext) = os.path.splitext(tmp)
parts = base.split('_')
forecast_num = parts[-1]
if len(forecast_num) < 2:
forecast_num = '0'+forecast_num
if has_anal_prod[i]:
last_forecast_num = str(lastForecastHour/deltaBetweenForecastHours + 1)
else:
last_forecast_num = str(lastForecastHour/deltaBetweenForecastHours)
if products[i] in tmp and int(forecast_num) <= int(last_forecast_num):
urlFileList.append(tmp)
#if debug:
# print >>sys.stderr, "urlFileList = ", urlFileList
if len(urlFileList) == 0:
if debug:
print >>sys.stderr, "WARNING: ignoring run and product - no data on server"
print >>sys.stderr, " for model run time: ", currentModelRun
print >>sys.stderr, " for product : ", products[i]
else:
# make target directory, if necessary, and cd to it
#targetDir = targetDirBase+'/'+dateHourStrList[i]+'/'+products[i]
targetDir = targetDirBase+'/'+currentModelRun
if not os.path.exists(targetDir):
os.makedirs(targetDir)
os.chdir(targetDir)
# get local file list - i.e. those which have already been downloaded
localFileList = os.listdir('.')
#localFileList.reverse()
#if debug:
# print >>sys.stderr, " localFileList: ", localFileList
# get url file list (not sure I need this)
#urlFileList.sort()
#urlFileList.reverse()
# loop through the url file list, downloading those that have
# not yet been downloaded
if debug:
print >>sys.stderr, "Starting to loop through url file list"
for idx,urlFileName in enumerate(urlFileList,0):
if debug:
print >>sys.stderr, " idx = ", idx
print >>sys.stderr, " urlFileName = ", urlFileName
#print >>sys.stderr, " urlDateList[",idx,"] = ", urlDateList[idx]
#print >>sys.stderr, " dateStr = ", dateStr
if urlFileName not in localFileList:
if debug:
print >>sys.stderr, urlFileName," not in localFileList -- get file"
try:
command = 'wget '+hrrrUrl+'/'+currentModelRun+'/'+urlFileName
os.system(command)
except Exception as e:
print sys.stderr, " wget failed, exception: ", e
continue
# rename file and move to web server
# first get forecast_hour
(base,ext) = os.path.splitext(urlFileName)
parts = base.split('_')
if has_anal_prod[i]:
forecast_hour = str( (int(parts[-1])-1) * deltaBetweenForecastHours)
else:
forecast_hour = str(int(parts[-1])*deltaBetweenForecastHours)
if len(forecast_hour) == 1:
forecast_hour = '0'+forecast_hour
if debug:
print >>sys.stderr, " forecast_hour = ", forecast_hour
# create full file name
newFileName = 'model.hrrr_03km.'+currentModelRun+'00.'+forecast_hour+'_'+products[i]+'.png'
if debug:
print >>sys.stderr, " newFileName = ", newFileName
# check to make sure that web server path exists
catalogDir = catalogBaseDir+'/'+dateStrList[t]
if not os.path.exists(catalogDir):
os.makedirs(catalogDir)
# copy file to web server
shutil.copy(targetDir+'/'+urlFileName,catalogDir+'/'+newFileName)
|
[
"[email protected]"
] | |
ec0874d853f78a0f15d0b9d998d6f76eec5ea4d5
|
85fc4fcd841226c30b1a5824468eae95e6da3cd1
|
/grass.py
|
16b07bf614dc8a4d3288eed474e10b56cb855a1c
|
[] |
no_license
|
a5vh/kattis
|
1676060acfc6eef1d7c558299063646f3b7fcbf3
|
093cbeba31149fa0182ecc1bc8a43c60cdb1fa36
|
refs/heads/master
| 2020-08-17T19:54:11.754205 | 2019-11-26T01:34:29 | 2019-11-26T01:34:29 | 215,705,247 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 205 |
py
|
cost = float(input())
L = int(input())
sum = 0.0
for i in range(L):
width, length = input().split()
width = float(width)
length = float(length)
sum += (width*length)*cost
print(float(sum))
|
[
"[email protected]"
] | |
10451e6a52200948649ca456a7e8d0ab43de0548
|
4fa785f727f8bd223e6a5a5ede9f8f642c67b4be
|
/tests/server/rest/task_stats_test.py
|
46b9084353e5f85d694203ea5176dd4360833a20
|
[
"MIT"
] |
permissive
|
carlwitt/iceprod
|
d020ff56f233f4a6312cdfa1cb09b2781c630f0a
|
bd00be6051dd847bdbadfec276fbe7d8e3fef81a
|
refs/heads/master
| 2020-04-18T22:35:53.053632 | 2019-01-27T06:19:35 | 2019-01-27T06:19:35 | 167,798,458 | 0 | 0 | null | 2019-01-27T10:55:30 | 2019-01-27T10:55:30 | null |
UTF-8
|
Python
| false | false | 5,430 |
py
|
"""
Test script for REST/task_stats
"""
import logging
logger = logging.getLogger('rest_task_stats_test')
import os
import sys
import time
import random
import shutil
import tempfile
import unittest
import subprocess
import json
from functools import partial
from unittest.mock import patch, MagicMock
from tests.util import unittest_reporter, glob_tests
import ldap3
import tornado.web
import tornado.ioloop
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.testing import AsyncTestCase
from rest_tools.server import Auth, RestServer
from iceprod.server.modules.rest_api import setup_rest
class rest_task_stats_test(AsyncTestCase):
def setUp(self):
super(rest_task_stats_test,self).setUp()
self.test_dir = tempfile.mkdtemp(dir=os.getcwd())
def cleanup():
shutil.rmtree(self.test_dir)
self.addCleanup(cleanup)
try:
self.port = random.randint(10000,50000)
self.mongo_port = random.randint(10000,50000)
dbpath = os.path.join(self.test_dir,'db')
os.mkdir(dbpath)
dblog = os.path.join(dbpath,'logfile')
m = subprocess.Popen(['mongod', '--port', str(self.mongo_port),
'--dbpath', dbpath, '--smallfiles',
'--quiet', '--nounixsocket',
'--logpath', dblog])
self.addCleanup(partial(time.sleep, 0.05))
self.addCleanup(m.terminate)
config = {
'auth': {
'secret': 'secret'
},
'rest': {
'task_stats': {
'database': {'port':self.mongo_port},
}
},
}
routes, args = setup_rest(config)
self.server = RestServer(**args)
for r in routes:
self.server.add_route(*r)
self.server.startup(port=self.port)
self.token = Auth('secret').create_token('foo', type='user', payload={'role':'admin','username':'admin'})
except Exception:
logger.info('failed setup', exc_info=True)
@unittest_reporter(name='REST POST /tasks/<task_id>/task_stats')
def test_100_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,'bar'),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/task_stats')
def test_200_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
task_id = 'bar'
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,task_id),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/tasks/%s/task_stats'%(self.port,'foo',task_id),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(len(ret), 1)
self.assertIn(task_stat_id, ret)
self.assertIn('task_id', ret[task_stat_id])
self.assertEqual(task_id, ret[task_stat_id]['task_id'])
self.assertEqual(data, ret[task_stat_id]['stats'])
# note: the name is so long it needs a break to wrap correctly
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/task_stats/<task_stat_id>')
def test_210_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
task_id = 'bar'
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,task_id),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/tasks/%s/task_stats/%s'%(self.port,'foo',task_id,task_stat_id),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(task_stat_id, ret['task_stat_id'])
self.assertEqual(task_id, ret['task_id'])
self.assertEqual(data, ret['stats'])
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(rest_task_stats_test))
suite.addTests(loader.loadTestsFromNames(alltests,rest_task_stats_test))
return suite
|
[
"[email protected]"
] | |
e65dc5a581a15d57eca06f9512858a6938fe718e
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/apimanagement/v20180101/__init__.py
|
fc73bf57d9496c3fbc71a455e1a1eea44a2efc5b
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 |
Apache-2.0
| 2023-09-14T13:17:00 | 2019-02-24T20:30:21 |
Python
|
UTF-8
|
Python
| false | false | 764 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .api_diagnostic import *
from .api_diagnostic_logger import *
from .backend import *
from .diagnostic import *
from .diagnostic_logger import *
from .get_api_diagnostic import *
from .get_backend import *
from .get_diagnostic import *
from .get_logger import *
from .get_subscription import *
from .get_user import *
from .group_user import *
from .logger import *
from .notification_recipient_user import *
from .subscription import *
from .user import *
from ._inputs import *
from . import outputs
|
[
"[email protected]"
] | |
5eee1f0c6972dcc67edd83dc03bce66c5ec25a2f
|
503f5089422a97dc6f496cb7ecdaaf711611e5c0
|
/ki/remote.py
|
c854b82580334b9df169d91e404712852fbe6ab3
|
[] |
no_license
|
jd/ki
|
b3e782ed176ea38099aff8ba0aea4e1c06ba754b
|
343eeee119e2167a52e882d7772ecf3fe8f04d3a
|
refs/heads/main
| 2023-05-06T07:06:47.694980 | 2012-03-13T16:17:26 | 2012-03-13T16:17:26 | 363,116,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,452 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ki.remote -- Git based file system storage remote access
#
# Copyright © 2011 Julien Danjou <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
from .config import Configurable, Config, BUS_INTERFACE
from .objects import FileBlock, FetchError
from dulwich.client import get_transport_and_path
from dulwich.errors import HangupException
import dbus.service
import uuid
class Remote(dbus.service.Object, Configurable):
_id_ref = "refs/tags/id"
def __init__(self, storage, name, url, weight=100):
self.url = url
self.weight = weight
self.storage = storage
self.name = name
self.client, self.path = get_transport_and_path(url)
super(Remote, self).__init__(storage.bus,
"%s/remotes/%s" % (storage.__dbus_object_path__, name))
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetURL(self):
return self.url
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetName(self):
return self.name
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='a{ss}')
def GetRefs(self):
return self.refs
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='i')
def GetWeight(self):
return self.weight
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetID(self):
return self.id
def fetch_sha1s(self, sha1s):
return self.fetch(lambda refs: sha1s)
@property
def id(self):
"""Fetch remote id."""
try:
return self._id
except AttributeError:
try:
self._id = str(FileBlock(self.storage, self.refs[Remote._id_ref]))
except KeyError:
f = FileBlock(self.storage)
f.data = str(uuid.uuid4())
def determine_wants(refs):
newrefs = refs.copy()
newrefs[Remote._id_ref] = f.store()
return newrefs
self.push(determine_wants)
self._id = str(f)
return self._id
@property
def config(self):
"""Fetch configuration from the remote."""
try:
return Config(self.storage, self.on_config_store, self.storage[self.refs[Config.ref]])
except KeyError:
return Config(self.storage, self.on_config_store)
def on_config_store(self, sha1):
"""Store the config on the remote."""
def determine_wants(oldrefs):
newrefs = oldrefs.copy()
newrefs[Config.ref] = sha1
return newrefs
self.push(determine_wants)
@property
def refs(self):
"""Connect to the remote and returns all the refs it has."""
return self.fetch(lambda refs: [])
@dbus.service.signal(dbus_interface="%s.Remote" % BUS_INTERFACE,
signature='as')
def FetchProgress(self, status):
pass
def fetch(self, determine_wants=None):
"""Fetch data from the remote.
The function passed in determine_wats is called with the refs dict as first and only argument:
{ "refs/heads/master": "08a1c9f9742bcbd27c44fb84b662c68fabd995e1",
… }
The determine_wants function should returns a list of SHA1 to fetch."""
return self.client.fetch(self.path, self.storage, determine_wants, self.FetchProgress)
def push(self, determine_wants):
"""Push data to the remote.
The function passed in determine_wants is called with the refs dict as first and only argument:
{ "refs/heads/master": "08a1c9f9742bcbd27c44fb84b662c68fabd995e1",
… } """
return self.client.send_pack(self.path,
determine_wants,
self.storage.object_store.generate_pack_contents)
def __le__(self, other):
if isinstance(other, Remote):
return self.weight <= other.weight
return self.weight <= other
def __lt__(self, other):
if isinstance(other, Remote):
return self.weight < other.weight
return self.weight < other
def __ge__(self, other):
if isinstance(other, Remote):
return self.weight >= other.weight
return self.weight >= other
def __gt__(self, other):
if isinstance(other, Remote):
return self.weight > other.weight
return self.weight > other
class Syncer(threading.Thread):
def __init__(self, storage):
self.storage = storage
super(Syncer, self).__init__()
self.daemon = True
self.name = "Syncer for %s" % self.storage.path
def run(self):
while True:
# XXX configure timeout
print "WAIT"
self.storage.must_be_sync.wait(5)
self.storage.must_be_sync.clear()
print "END WAIT"
print "PUSH"
try:
self.storage.push()
except HangupException as e:
print "> Unable to push: %s" % str(e)
print "FETCH"
try:
self.storage.fetch()
except HangupException as e:
print "> Unable to fetch: %s" % str(e)
print "FETCH BLOBS"
try:
self.storage.fetch_blobs()
except FetchError as e:
print "> Unable to fetch blobs: %s" % str(e)
print "UPDATE FROM REMOTES"
self.storage.update_from_remotes()
|
[
"[email protected]"
] | |
ff42f8804ec94ff50d0a4adaeef22c31c4a5ffae
|
a0c31af5f4bfbe22b70144c10f7d86cf1184643f
|
/SWEA/D2/swea4869.py
|
a4bdcaf424186dbc110410f40ec353f88ab0c5dd
|
[] |
no_license
|
ejolie/problem-solving
|
63a8eb39de11f7ea0525976c9c03b7f7675075c5
|
b51fbf71b72d837897db3c04cbc4037b6f6c11f7
|
refs/heads/master
| 2021-12-08T00:00:02.155359 | 2021-12-05T14:16:43 | 2021-12-05T14:16:43 | 138,751,091 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
'''
4869. 종이붙이기
'''
def solve(n):
memo = [0] * 1000
memo[0], memo[1], memo[2], = 0, 1, 3
for i in range(3, n+1):
memo[i] = memo[i-1] + 2 * memo[i-2]
return memo[n]
tc = int(input())
for t in range(1, tc+1):
n = int(input()) // 10
res = solve(n)
print(f'#{t} {res}')
|
[
"[email protected]"
] | |
b97544b29ef011a1bb3f7e272cad39405a3fb6fe
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-artifact-registry/samples/generated_samples/artifactregistry_v1_generated_artifact_registry_update_repository_async.py
|
b63934b97c65c789e9fed24aaf4c1f93d927cd44
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 |
Apache-2.0
| 2023-09-14T21:45:18 | 2014-01-28T15:51:47 |
Python
|
UTF-8
|
Python
| false | false | 1,900 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateRepository
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-artifact-registry
# [START artifactregistry_v1_generated_ArtifactRegistry_UpdateRepository_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
async def sample_update_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryAsyncClient()
# Initialize request argument(s)
request = artifactregistry_v1.UpdateRepositoryRequest(
)
# Make the request
response = await client.update_repository(request=request)
# Handle the response
print(response)
# [END artifactregistry_v1_generated_ArtifactRegistry_UpdateRepository_async]
|
[
"[email protected]"
] | |
48c3584ee392fe27f7544c8d6fee6e955d5afa00
|
282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19
|
/Malware1/venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
eb2c27208662d5721e07c901e969a66e7645e995
|
[] |
no_license
|
sameerakhtar/CyberSecurity
|
9cfe58df98495eac6e4e2708e34e70b7e4c055d3
|
594973df27b4e1a43f8faba0140ce7d6c6618f93
|
refs/heads/master
| 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:1fde0343be64a238fb4b62a8dce7854c50385b5236eeb3f1d75b38b1778d4544
size 19176
|
[
"[email protected]"
] | |
e16387b80fa3552314a78fda622b70dc3aebb153
|
de3f3575121df3188754145a43b7c10499305f37
|
/testCsv.py
|
1d577e4fb3c6d01b0fe7e1bc61ae52baa313c03f
|
[] |
no_license
|
bobosky/GeoLifeDataMining
|
cbd24422959887575c3f15415988e4e43e6ed4b4
|
54a3d1727633bc4e7c43893b14e570fd8fce2068
|
refs/heads/master
| 2020-05-03T09:40:17.872102 | 2018-11-08T17:57:18 | 2018-11-08T17:57:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 132 |
py
|
import numpy as np
import pandas as pd
path = 'data/data.csv'
df = pd.read_csv(path)
print(df.head())
print(df.shape)
|
[
"[email protected]"
] | |
6fd61b04aee45278a3fe0c68198e4cb3b20772d6
|
44ddd25c6aa008cc0a814f9f49b2344c6a59aedb
|
/lib/coloraide/spaces/a98_rgb_linear.py
|
c5f0c249dad74d33c454e08de369aeef8a33f908
|
[
"MIT"
] |
permissive
|
facelessuser/ColorHelper
|
eb757896fa6e4a9029090188fad789587dc2ed06
|
ad4d779bff57a65b7c77cda0b79c10cf904eb817
|
refs/heads/master
| 2023-08-31T20:51:30.390633 | 2023-08-28T15:53:39 | 2023-08-28T15:53:39 | 31,641,842 | 279 | 41 |
MIT
| 2023-09-06T23:37:41 | 2015-03-04T06:27:11 |
Python
|
UTF-8
|
Python
| false | false | 1,610 |
py
|
"""Linear A98 RGB color class."""
from ..cat import WHITES
from .srgb import sRGB
from .. import algebra as alg
from ..types import Vector
RGB_TO_XYZ = [
[0.5766690429101307, 0.1855582379065463, 0.18822864623499472],
[0.2973449752505361, 0.6273635662554661, 0.07529145849399789],
[0.027031361386412343, 0.07068885253582723, 0.9913375368376389]
]
XYZ_TO_RGB = [
[2.0415879038107456, -0.5650069742788595, -0.34473135077832956],
[-0.9692436362808795, 1.8759675015077202, 0.0415550574071756],
[0.013444280632031147, -0.11836239223101837, 1.0151749943912054]
]
def lin_a98rgb_to_xyz(rgb: Vector) -> Vector:
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
return alg.dot(RGB_TO_XYZ, rgb, dims=alg.D2_D1)
def xyz_to_lin_a98rgb(xyz: Vector) -> Vector:
"""Convert XYZ to linear-light a98-rgb."""
return alg.dot(XYZ_TO_RGB, xyz, dims=alg.D2_D1)
class A98RGBLinear(sRGB):
"""Linear A98 RGB class."""
BASE = "xyz-d65"
NAME = "a98-rgb-linear"
SERIALIZE = ('--a98-rgb-linear',)
WHITE = WHITES['2deg']['D65']
def to_base(self, coords: Vector) -> Vector:
"""To XYZ from A98 RGB."""
return lin_a98rgb_to_xyz(coords)
def from_base(self, coords: Vector) -> Vector:
"""From XYZ to A98 RGB."""
return xyz_to_lin_a98rgb(coords)
|
[
"[email protected]"
] | |
14a6e111047115fb194beef463f65d4a8c6f9c42
|
7d9d3d5ce2ac19221163d54a94c025993db0af4f
|
/autotest/ogr/ogr_as_sqlite_extension.py
|
196c19f4dacaaa9d6eaa16bd697cd2fcec173aff
|
[
"MIT"
] |
permissive
|
dcgull/gdal
|
5408adad77d001db32173bba547b447220b5e9a2
|
a5e2a7b54db955bd061ebfc6d69aa2dd752b120c
|
refs/heads/master
| 2020-04-03T13:30:40.013172 | 2013-10-11T12:07:57 | 2013-10-11T12:07:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,417 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_as_sqlite_extension.py 25408 2012-12-30 21:41:43Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GDAL as a SQLite3 dynamically loaded extension
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
# This file is meant at being run by ogr_virtualogr_3()
# This is a bit messy with heavy use of ctypes. The sqlite3 python module
# is rarely compiled with support of extension loading, so we just simulate
# what a tiny C program would do
import sys
def do(sqlite3name, gdalname):
try:
import ctypes
except:
print('skip')
sys.exit(0)
sqlite_handle = ctypes.cdll.LoadLibrary(sqlite3name)
if sqlite_handle is None:
print('skip')
sys.exit(0)
db = ctypes.c_void_p(0)
pdb = ctypes.pointer(db)
if hasattr(sqlite_handle, 'sqlite3_open'):
ret = sqlite_handle.sqlite3_open(':memory:', pdb)
elif hasattr(sqlite_handle, 'SPLite3_open'):
ret = sqlite_handle.SPLite3_open(':memory:', pdb)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('Error sqlite3_open ret = %d' % ret)
sys.exit(1)
if hasattr(sqlite_handle, 'sqlite3_enable_load_extension'):
ret = sqlite_handle.sqlite3_enable_load_extension(db, 1)
elif hasattr(sqlite_handle, 'SPLite3_enable_load_extension'):
ret = sqlite_handle.SPLite3_enable_load_extension(db, 1)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('skip')
sys.exit(0)
gdalname = gdalname.encode('ascii')
if hasattr(sqlite_handle, 'sqlite3_load_extension'):
ret = sqlite_handle.sqlite3_load_extension(db, gdalname, None, None)
else:
ret = sqlite_handle.SPLite3_load_extension(db, gdalname, None, None)
if ret != 0:
print('Error sqlite3_load_extension ret = %d' % ret)
sys.exit(1)
tab = ctypes.c_void_p()
ptab = ctypes.pointer(tab)
nrow = ctypes.c_int(0)
pnrow = ctypes.pointer(nrow)
ncol = ctypes.c_int(0)
pncol = ctypes.pointer(ncol)
if hasattr(sqlite_handle, 'sqlite3_get_table'):
ret = sqlite_handle.sqlite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
else:
ret = sqlite_handle.SPLite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
if ret != 0:
print('Error sqlite3_get_table ret = %d' % ret)
sys.exit(1)
cast_tab = ctypes.cast(tab, ctypes.POINTER(ctypes.c_char_p))
sys.stdout.write(cast_tab[1].decode('ascii'))
sys.stdout.flush()
if hasattr(sqlite_handle, 'sqlite3_close'):
ret = sqlite_handle.sqlite3_close(db)
else:
ret = sqlite_handle.SPLite3_close(db)
if ret != 0:
sys.exit(1)
gdaltest_list = []
if __name__ == '__main__':
if len(sys.argv) != 3:
print('python ogr_as_sqlite_extension name_of_libsqlite3 name_of_libgdal')
sys.exit(1)
do(sys.argv[1], sys.argv[2])
|
[
"[email protected]"
] | |
0dcad145c3a9be191489a298a432ae4d0bec4600
|
619b9aae66e9787f335b2a31362bfee8e76672a7
|
/fa/jquery/templates/admin/edit.pt.py
|
b4da5d538f39f2d1845c335dd7f3f386c88e0f4c
|
[] |
no_license
|
pgiraud/fa.jquery
|
150d8653875477c8fb4152a811a9dafd81cbd709
|
793154f287cd1ed339ffad2d46dcb90e37c319af
|
refs/heads/master
| 2020-12-30T17:50:14.830285 | 2011-05-13T16:17:52 | 2011-05-13T16:17:52 | 1,691,472 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,699 |
py
|
registry = dict(version=0)
def bind():
from cPickle import loads as _loads
_lookup_attr = _loads('cchameleon.core.codegen\nlookup_attr\np1\n.')
_attrs_4362945488 = _loads('(dp1\nVclass\np2\nVui-icon ui-icon-circle-arrow-w\np3\ns.')
_init_scope = _loads('cchameleon.core.utils\necontext\np1\n.')
_re_amp = _loads("cre\n_compile\np1\n(S'&(?!([A-Za-z]+|#[0-9]+);)'\np2\nI0\ntRp3\n.")
_attrs_4362944592 = _loads('(dp1\nVname\np2\nV_method\np3\nsVtype\np4\nVhidden\np5\nsVvalue\np6\nVPUT\np7\ns.')
_attrs_4362944656 = _loads('(dp1\nVclass\np2\nVfa_controls\np3\ns.')
_attrs_4362923984 = _loads('(dp1\n.')
_attrs_4362923856 = _loads('(dp1\nVaction\np2\nV\nsVmethod\np3\nVPOST\np4\nsVenctype\np5\nVmultipart/form-data\np6\ns.')
_attrs_4362944784 = _loads('(dp1\nVname\np2\nVnext\np3\nsVvalue\np4\nV\nsVtype\np5\nVhidden\np6\nsVid\np7\nVnext\np8\ns.')
_attrs_4362944976 = _loads('(dp1\nVclass\np2\nVui-icon ui-icon-check\np3\ns.')
_init_stream = _loads('cchameleon.core.generation\ninitialize_stream\np1\n.')
_attrs_4362923728 = _loads('(dp1\n.')
_init_default = _loads('cchameleon.core.generation\ninitialize_default\np1\n.')
_attrs_4362945104 = _loads('(dp1\nVtype\np2\nVsubmit\np3\ns.')
_attrs_4362945360 = _loads('(dp1\nVtype\np2\nVsubmit\np3\ns.')
_attrs_4362944912 = _loads("(dp1\nVhref\np2\nV#\nsVonclick\np3\nVvar f = jQuery(this).parents('form'); f.attr('action', window.location.href.replace('/edit', '/delete'));f.submit();\np4\nsVclass\np5\nVui-widget-header ui-widget-link ui-widget-button ui-state-error ui-corner-all\np6\ns.")
_attrs_4362944848 = _loads("(dp1\nVhref\np2\nV#\nsVonclick\np3\nVjQuery(this).parents('form').submit();\np4\nsVclass\np5\nVui-widget-header ui-widget-link ui-widget-button ui-corner-all\np6\ns.")
_attrs_4362945168 = _loads('(dp1\nVclass\np2\nVfa_controls ui-widget-header ui-widget-link ui-corner-all\np3\ns.')
_init_tal = _loads('cchameleon.core.generation\ninitialize_tal\np1\n.')
_attrs_4362945232 = _loads('(dp1\nVclass\np2\nVui-icon ui-icon-trash\np3\ns.')
def render(econtext, rcontext=None):
macros = econtext.get('macros')
_translate = econtext.get('_translate')
_slots = econtext.get('_slots')
target_language = econtext.get('target_language')
u'_init_stream()'
(_out, _write, ) = _init_stream()
u'_init_tal()'
(_attributes, repeat, ) = _init_tal()
u'_init_default()'
_default = _init_default()
u'None'
default = None
u'None'
_domain = None
u"main.macros['master']"
_metal = _lookup_attr(econtext['main'], 'macros')['master']
def _callback_main(econtext, _repeat, _out=_out, _write=_write, _domain=_domain, **_ignored):
if _repeat:
repeat.update(_repeat)
attrs = _attrs_4362923728
_write(u'<div>\n ')
attrs = _attrs_4362923856
u"''"
_write(u'<form action="" method="POST" enctype="multipart/form-data">\n ')
_default.value = default = ''
u'fs.render()'
_content = _lookup_attr(econtext['fs'], 'render')()
attrs = _attrs_4362923984
u'_content'
_write(u'<div>')
_tmp1 = _content
_tmp = _tmp1
if (_tmp.__class__ not in (str, unicode, int, float, )):
try:
_tmp = _tmp.__html__
except:
_tmp = _translate(_tmp, domain=_domain, mapping=None, target_language=target_language, default=None)
else:
_tmp = _tmp()
_write(_tmp)
_tmp = None
if (_tmp is not None):
if not isinstance(_tmp, unicode):
_tmp = str(_tmp)
_write(_tmp)
_write(u'</div>\n ')
attrs = _attrs_4362944592
_write(u'<input type="hidden" name="_method" value="PUT" />\n ')
attrs = _attrs_4362944656
_write(u'<div class="fa_controls">\n ')
attrs = _attrs_4362944784
_write(u'<input type="hidden" id="next" name="next" value="" />\n ')
attrs = _attrs_4362944848
_write(u'<a class="ui-widget-header ui-widget-link ui-widget-button ui-corner-all" href="#" onclick="jQuery(this).parents(\'form\').submit();">\n ')
attrs = _attrs_4362944976
u"''"
_write(u'<span class="ui-icon ui-icon-check"></span>\n ')
_default.value = default = ''
u'Save'
_content = u'Save'
u'_content'
_tmp1 = _content
_tmp = _tmp1
if (_tmp.__class__ not in (str, unicode, int, float, )):
try:
_tmp = _tmp.__html__
except:
_tmp = _translate(_tmp, domain=_domain, mapping=None, target_language=target_language, default=None)
else:
_tmp = _tmp()
_write(_tmp)
_tmp = None
if (_tmp is not None):
if not isinstance(_tmp, unicode):
_tmp = str(_tmp)
if ('&' in _tmp):
if (';' in _tmp):
_tmp = _re_amp.sub('&', _tmp)
else:
_tmp = _tmp.replace('&', '&')
if ('<' in _tmp):
_tmp = _tmp.replace('<', '<')
if ('>' in _tmp):
_tmp = _tmp.replace('>', '>')
_write(_tmp)
_write(u'\n ')
attrs = _attrs_4362945104
_write(u'<input type="submit" />\n </a>\n ')
attrs = _attrs_4362944912
_write(u'<a class="ui-widget-header ui-widget-link ui-widget-button ui-state-error ui-corner-all" href="#" onclick="var f = jQuery(this).parents(\'form\'); f.attr(\'action\', window.location.href.replace(\'/edit\', \'/delete\'));f.submit();">\n ')
attrs = _attrs_4362945232
u"''"
_write(u'<span class="ui-icon ui-icon-trash"></span>\n ')
_default.value = default = ''
u'Delete'
_content = u'Delete'
u'_content'
_tmp1 = _content
_tmp = _tmp1
if (_tmp.__class__ not in (str, unicode, int, float, )):
try:
_tmp = _tmp.__html__
except:
_tmp = _translate(_tmp, domain=_domain, mapping=None, target_language=target_language, default=None)
else:
_tmp = _tmp()
_write(_tmp)
_tmp = None
if (_tmp is not None):
if not isinstance(_tmp, unicode):
_tmp = str(_tmp)
if ('&' in _tmp):
if (';' in _tmp):
_tmp = _re_amp.sub('&', _tmp)
else:
_tmp = _tmp.replace('&', '&')
if ('<' in _tmp):
_tmp = _tmp.replace('<', '<')
if ('>' in _tmp):
_tmp = _tmp.replace('>', '>')
_write(_tmp)
_write(u'\n ')
attrs = _attrs_4362945360
_write(u'<input type="submit" />\n </a>\n ')
attrs = _attrs_4362945168
u'request.fa_url(request.model_name)'
_write(u'<a class="fa_controls ui-widget-header ui-widget-link ui-corner-all"')
_tmp1 = _lookup_attr(econtext['request'], 'fa_url')(_lookup_attr(econtext['request'], 'model_name'))
if (_tmp1 is _default):
_tmp1 = None
if ((_tmp1 is not None) and (_tmp1 is not False)):
if (_tmp1.__class__ not in (str, unicode, int, float, )):
_tmp1 = unicode(_translate(_tmp1, domain=_domain, mapping=None, target_language=target_language, default=None))
else:
if not isinstance(_tmp1, unicode):
_tmp1 = str(_tmp1)
if ('&' in _tmp1):
if (';' in _tmp1):
_tmp1 = _re_amp.sub('&', _tmp1)
else:
_tmp1 = _tmp1.replace('&', '&')
if ('<' in _tmp1):
_tmp1 = _tmp1.replace('<', '<')
if ('>' in _tmp1):
_tmp1 = _tmp1.replace('>', '>')
if ('"' in _tmp1):
_tmp1 = _tmp1.replace('"', '"')
_write(((' href="' + _tmp1) + '"'))
_write(u'>\n ')
attrs = _attrs_4362945488
u"''"
_write(u'<span class="ui-icon ui-icon-circle-arrow-w"></span>\n ')
_default.value = default = ''
u'Cancel'
_content = u'Cancel'
u'_content'
_tmp1 = _content
_tmp = _tmp1
if (_tmp.__class__ not in (str, unicode, int, float, )):
try:
_tmp = _tmp.__html__
except:
_tmp = _translate(_tmp, domain=_domain, mapping=None, target_language=target_language, default=None)
else:
_tmp = _tmp()
_write(_tmp)
_tmp = None
if (_tmp is not None):
if not isinstance(_tmp, unicode):
_tmp = str(_tmp)
if ('&' in _tmp):
if (';' in _tmp):
_tmp = _re_amp.sub('&', _tmp)
else:
_tmp = _tmp.replace('&', '&')
if ('<' in _tmp):
_tmp = _tmp.replace('<', '<')
if ('>' in _tmp):
_tmp = _tmp.replace('>', '>')
_write(_tmp)
_write(u'\n </a>\n </div>\n </form>\n </div>\n')
u"{'main': _callback_main}"
_tmp = {'main': _callback_main, }
u"main.macros['master']"
_metal.render(_tmp, _out=_out, _write=_write, _domain=_domain, econtext=econtext)
return _out.getvalue()
return render
__filename__ = '/Users/gawel/py/formalchemy_project/fa.jquery/fa/jquery/templates/admin/edit.pt'
registry[(None, True, '1488bdb950901f8f258549439ef6661a49aae984')] = bind()
|
[
"[email protected]"
] | |
08f8458a8dd2f4c2231f5131f93e9e29971d471a
|
52b5fa23f79d76883728d8de0bfd202c741e9c43
|
/kubernetes/test/test_v1beta1_replica_set.py
|
18fd0a6b9e4f4ded18a2d959d94bdc11e6ac95c0
|
[] |
no_license
|
kippandrew/client-python-tornado
|
5d00810f57035825a84e37ff8fc89a7e79aed8da
|
d479dfeb348c5dd2e929327d800fe033b5b3b010
|
refs/heads/master
| 2021-09-04T13:01:28.275677 | 2018-01-18T23:27:34 | 2018-01-18T23:27:34 | 114,912,995 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 971 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1beta1_replica_set import V1beta1ReplicaSet # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1ReplicaSet(unittest.TestCase):
"""V1beta1ReplicaSet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ReplicaSet(self):
"""Test V1beta1ReplicaSet"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1beta1_replica_set.V1beta1ReplicaSet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
7a3ac6320527067a9046da167e160e4d3cf4874b
|
056879eddb853dbf8c96954d212d862282a0dee7
|
/basic/text2num.py
|
e2d87139aa40b95eb976dee89a871e1df1deacc5
|
[] |
no_license
|
pytutorial/samples
|
fed3965bc7ff3a81913bf24db7212dfbf6ab9411
|
850cdf87867ec4ac568405ab055ae9e40b636479
|
refs/heads/master
| 2022-08-14T23:26:31.606502 | 2022-07-26T02:55:16 | 2022-07-26T02:55:16 | 210,514,946 | 9 | 13 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,714 |
py
|
"""
Chương trình chuyển phát âm tiếng Việt của một số 3 chữ số sang giá trị số
- Đầu vào : phát âm tiếng Việt của một số trong phạm vi 1 đến 999
- Đầu ra : giá trị của số
"""
bang_so1 = {'một' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'năm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9, 'mười' : 10}
bang_so2 = {'một' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'lăm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9}
bang_so3 = {'mươi' : 0, 'mốt' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'tư' : 4, 'lăm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9}
def convert2digits(words):
N = len(words)
if N == 1:
return bang_so1.get(words[0], -1)
chuc, donvi = -1, -1
if (N == 3 and words[1] == 'mươi') or N == 2:
chuc = bang_so1.get(words[0], -1)
donvi = bang_so3.get(words[-1], -1)
if N == 2 and words[0] == 'mười':
chuc = 1
donvi = bang_so2.get(words[1], -1)
if chuc >= 0 and donvi >= 0:
return 10 * chuc + donvi
return -1
def convert3digits(words):
N = len(words)
if N <= 1 or words[1] != 'trăm':
return convert2digits(words)
tram = bang_so1.get(words[0], -1)
if N == 2 and tram >= 0:
return 100*tram
if N == 4 and words[2] == 'lẻ':
donvi = bang_so1.get(words[3], -1)
if tram >= 0 and donvi >= 0:
return 100*tram + donvi
x = convert2digits(words[2:])
if tram >= 0 and x >= 0:
return 100*tram + x
return -1
def text2num(text):
return convert3digits(text.lower().split())
print(text2num('tám trăm năm mươi tư'))
|
[
"[email protected]"
] | |
c1968fe6cfbb5444113b80f3730a3e5aeeba6f4b
|
dd7764ea75adb44bd3c51e862c9113c8da239b95
|
/quarry.wsgi
|
ac51a551dbbccfcf99e07b89e775f82a9ccf654d
|
[
"MIT"
] |
permissive
|
ToAruShiroiNeko/analytics-quarry-web
|
29a498c3a4cb49d85323a879a17996ed9a66ca92
|
e2346c5ec47d63f9514b8aef9190211fa5ff0e45
|
refs/heads/master
| 2020-12-11T03:49:07.429616 | 2015-05-15T14:23:37 | 2015-05-15T21:47:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 46 |
wsgi
|
from quarry.web.app import app as application
|
[
"[email protected]"
] | |
6f940feba72d8b1ff8c1ca3a405ead22e64a3171
|
bd17e9fc0e5978cb664037bffdcf618a893e0523
|
/python/dataio/reader/discrete_sequence_reader.py
|
0991026d62042e0bee801ae9d215617255917c4c
|
[] |
no_license
|
kedz/ntg
|
598513fb2c6e910ad11f40f031675a587eb7ec79
|
34f13b23a6850eb0c8a727a51e7aa49fd6aec098
|
refs/heads/master
| 2020-12-07T15:29:10.305416 | 2017-11-07T03:07:52 | 2017-11-07T03:07:52 | 95,521,368 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,243 |
py
|
import torch
from dataio.reader.reader_base import ReaderBase
from preprocessor import TextPreprocessor
from vocab import Vocab
class DiscreteSequenceReader(ReaderBase):
def __init__(self, field=0, strip=True, lowercase=True,
replace_digits=True, tokenizer=None,
unknown_token="_UNK_", special_tokens=None,
top_k=10000000, at_least=1, left_pad=None, right_pad=None,
offset_output=False):
if isinstance(special_tokens, str):
special_tokens = [special_tokens]
elif special_tokens is None:
special_tokens = []
if isinstance(left_pad, str):
left_pad = [left_pad]
elif left_pad is None:
left_pad = []
if isinstance(right_pad, str):
right_pad = [right_pad]
elif right_pad is None:
right_pad = []
for token in left_pad + right_pad:
if token not in special_tokens:
special_tokens.append(token)
self.left_pad_ = left_pad
self.right_pad_ = right_pad
self.offset_output_ = offset_output
v = Vocab(
unknown_token=unknown_token, special_tokens=special_tokens,
at_least=at_least, top_k=top_k)
pp = TextPreprocessor(
strip=strip, lowercase=lowercase, replace_digits=replace_digits,
tokenizer=tokenizer)
super(DiscreteSequenceReader, self).__init__(field, pp, v)
self.register_data("data_")
self.register_data("length_")
if self.offset_output:
self.register_data("data_offset_")
def process(self, string):
tokens = self.left_pad + self.preprocess(string) + self.right_pad
indices = [self.vocab.index(token) for token in tokens]
return indices
def save_data(self, datum):
if self.offset_output:
self.data_.append(datum[:-1])
self.data_offset_.append(datum[1:])
self.length_.append(len(datum) - 1)
else:
self.data_.append(datum)
self.length_.append(len(datum))
def info(self):
total = sum(v for k, v in self.vocab.count.items())
unique = len(self.vocab.count)
msg = "DiscreteSequenceReader found {} tokens with " \
"{} unique labels.\n".format(total, unique)
msg += "After pruning, vocabulary has {} unique tokens.\n".format(
self.vocab.size)
for i in range(1, min(self.vocab.size, 11)):
token = self.vocab.token(i)
count = self.vocab.count.get(token, 0)
msg += "{}) {} ({})\n".format(i, token, count)
if i < self.vocab.size:
msg += ":\n:\n:\n"
for i in range(self.vocab.size - 11, self.vocab.size):
token = self.vocab.token(i)
count = self.vocab.count.get(token, 0)
msg += "{}) {} ({})\n".format(i, token, count)
return msg
def finish(self, reset=True):
data_size = len(self.length_)
max_len = max(self.length_)
zed = tuple([0])
if self.offset_output:
for i in range(data_size):
if self.length_[i] < max_len:
self.data_[i] += zed * (max_len - self.length_[i])
self.data_offset_[i] += zed * (max_len - self.length_[i])
input = torch.LongTensor(self.data_)
output = torch.LongTensor(self.data_offset_)
length = torch.LongTensor(self.length_)
finshed_data = (input, output, length)
else:
for i in range(data_size):
if self.length_[i] < max_len:
self.data_[i] += zed * (max_len - self.length_[i])
data = torch.LongTensor(self.data_)
length = torch.LongTensor(self.length_)
finshed_data = (data, length)
if reset:
self.reset()
return finshed_data
@property
def offset_output(self):
return self.offset_output_
@property
def left_pad(self):
return self.left_pad_
@property
def right_pad(self):
return self.right_pad_
|
[
"[email protected]"
] | |
e70451a24f542e96fed0a9b76822a4bdadd5842e
|
3f6088cf1aaaddc18ca1c6f2d5bfc69590941d60
|
/Xianyang_vmd/projects/tempCodeRunnerFile.py
|
bf13b6244bf4469a0f975dfc031366cad3270751
|
[
"MIT"
] |
permissive
|
YX577/MonthlyRunoffForecastByAutoReg
|
80038b1b0401d0dbe9b4b67cf531298090815cf7
|
2d66c628141f001e4ffb3dc3b7520a0f0f0ff239
|
refs/heads/master
| 2022-03-30T10:48:30.165288 | 2020-01-17T02:36:47 | 2020-01-17T02:36:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3 |
py
|
vmd
|
[
"[email protected]"
] | |
f885ec0af4413843f7eeaa5784e43cc759c6288f
|
c1e4c5ee80eb8c820bbc0319f2803123ee4ab781
|
/misc/hashfiles.py
|
af58a723def1a6e16a757c8e9883af9b88906013
|
[] |
no_license
|
pstrinkle/thesis-source
|
f6a2835e2464ea7294b35bbfdfec1f586196fc90
|
91ed3b5a4230864d20db38f4f9b22a7c0a73f1ec
|
refs/heads/master
| 2021-01-17T17:50:09.448806 | 2016-06-13T21:15:53 | 2016-06-13T21:15:53 | 61,070,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,005 |
py
|
#! /usr/bin/python
__author__ = '[email protected]'
##
# @author: Patrick Trinkle
# Summer 2011
#
# @summary: Stuff.
#
import os
import sys
import hashlib
import misc
def usage():
"""Parameters."""
sys.stderr.write("usage: %s path\n" % sys.argv[0])
def main():
if len(sys.argv) != 2:
usage()
sys.exit(-1)
startpoint = sys.argv[1]
file_hashes = {}
for path in misc.get_file(startpoint):
with open(path, "r") as path:
contents = path.read()
hash = hashlib.sha512(contents).hexdigest()
try:
file_hashes[hash].append(path)
except KeyError:
file_hashes[hash] = []
file_hashes[hash].append(path)
for hash in file_hashes:
if len(file_hashes[hash]) > 1:
print "found possible duplicates"
for path in file_hashes[hash]:
print "\t%s" % path
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
b73546084cc6476dceee5b63449f2da885256011
|
5dc7dc7e33122e8c588eb6e13f23bf032c704d2e
|
/econ_platform_core/extensions/__init__.py
|
24d3751da8aa85d909e3378dd18c6abaa1729eb8
|
[
"Apache-2.0"
] |
permissive
|
brianr747/platform
|
a3319e84858345e357c1fa9a3916f92122775b30
|
84b1bd90fc2e35a51f32156a8d414757664b4b4f
|
refs/heads/master
| 2022-01-23T16:06:26.855556 | 2022-01-12T18:13:22 | 2022-01-12T18:13:22 | 184,085,670 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,506 |
py
|
"""
Platform extensions.
Place all modules that extend the platform (including monkey-patching the base code).
This module creates an load_extensions() function that imports *all* python source (*.py) modules in this directory.
Will come up more options (a user-configurable list?) later.
Obviously, use at own risk!
Copyright 2019 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import os
import econ_platform_core
# This function will be replaced with straight import statements. Only leaving this dynamic
# since the design is changing rapidly at this stage. Once the core has stabilised, we will just import
# the "core extensions". As a result, no point in figuring out unit test techniques.
# Also: may create a "ExtensionManager" class to do this work.
class ExtensionManager(econ_platform_core.PlatformEntity):
"""
Class to handle extension loading and status. Currently non-functional; code will migrate to using this.
This class just offers the interface (for code completion purposes; the real extension manager will be
defined in extensions.__init__.py
"""
def __init__(self):
super().__init__()
self.LoadedExtensions = []
self.FailedExtensions = []
self.DecoratedFailedExtensions = []
def load_extensions(): # pragma: nocover
"""
Imports all *.py files in this directory (in alphabetical order).
Since the order of import will eventually matter, will need to add something to force a order of import operations.
For now, not am issue (can just use the alphabetical order rule to fix problems).
All errors are caught and largely ignored (other than listing the module that failed, and a text dump on the
console.
Returns [loaded_extensions, failed_extensions]
The operations on import of an extension:
(1) The import itself. If you wish, you can just put a script that is executed.
(2) If the module has a variable (hopefully a string) with the name 'extension_name', that is used as the extension
name for display, otherwise it is the name of the text file.
(3) If the module has a main() function, it is called.
Since logging is not yet initialised, things are dumped to console rather than logged. (If you really need logging
for debugging purposes, you could turn on logging in the extension.)
:return: list
"""
# There might be some iteration tools in importlib, but no time to read documentation...
this_dir = os.path.dirname(__file__)
flist = os.listdir(this_dir)
# Do alphabetical order
flist.sort()
exclusion_list = ['__init__']
loaded_extensions = []
failed_extensions = []
decorated_fails = []
use_monkey_example = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseMonkeyPatchExample')
use_example_provider = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseExampleProvider')
if not use_monkey_example:
exclusion_list.append('monkey_patch_example')
if not use_example_provider:
exclusion_list.append('hook_provider_example')
for fname in flist:
fname = fname.lower()
if not fname.endswith('.py'):
continue
fname = fname[:-3]
if fname in exclusion_list:
continue
# Import it!
try:
mod = importlib.import_module('econ_platform_core.extensions.' + fname)
if hasattr(mod, 'extension_name'):
fname = str(mod.extension_name)
# Try running main()
if hasattr(mod, 'main'):
mod.main()
print('Extension {0} loaded.'.format(fname))
loaded_extensions.append(fname)
except Exception as ex:
print('Failure loading extension:', fname)
print(type(ex), str(ex))
failed_extensions.append(fname)
decorated_fails.append((fname, str(ex)))
return (loaded_extensions, failed_extensions, decorated_fails)
|
[
"[email protected]"
] | |
90e67e8f27903d86d9ceb8579ee7c679d3dfeaae
|
4d59015f3392952d3b969cd46289974f4ed625cc
|
/machines/rasppi01/current_loop_logger.py
|
513a8ace41ef71fc9224b526309d518a429499f5
|
[] |
no_license
|
jlopezBolt/PyExpLabSys
|
766d6eae909c10db1783c31f9c0bb9478d22cd74
|
14d2a24c3031a78da0d2d686c42bc01ffe18faca
|
refs/heads/master
| 2021-01-19T23:57:52.297666 | 2016-04-19T08:20:02 | 2016-04-19T08:21:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,385 |
py
|
""" Argon pressure measuring """
from __future__ import print_function
import threading
import logging
import time
from PyExpLabSys.common.value_logger import ValueLogger
from PyExpLabSys.common.database_saver import ContinuousDataSaver
from PyExpLabSys.common.sockets import DateDataPullSocket
from PyExpLabSys.common.sockets import LiveSocket
from ABE_helpers import ABEHelpers
from ABE_ADCPi import ADCPi
import credentials
class PressureReader(threading.Thread):
""" Read argon pressure """
def __init__(self, adc):
threading.Thread.__init__(self)
self.adc = adc
self.waterpressure = -1
self.quit = False
def value(self):
""" Return the value of the reader """
return self.waterpressure
def run(self):
while not self.quit:
time.sleep(1)
current = (self.adc.read_voltage(1) / 148) * 1000
self.waterpressure = (current - 4) * (500 / 16) * 0.068947
def main():
""" Main function """
logging.basicConfig(filename="logger.txt", level=logging.ERROR)
logging.basicConfig(level=logging.ERROR)
i2c_helper = ABEHelpers()
bus = i2c_helper.get_smbus()
adc_instance = ADCPi(bus, 0x68, 0x69, 18)
pressurereader = PressureReader(adc_instance)
pressurereader.daemon = True
pressurereader.start()
logger = ValueLogger(pressurereader, comp_val=0.5)
logger.start()
socket = DateDataPullSocket('hall_n5_argon_pressure',
['n5_argon_pressure'], timeouts=[1.0])
socket.start()
live_socket = LiveSocket('hall_n5_argon_pressure', ['n5_argon_pressure'], 2)
live_socket.start()
db_logger = ContinuousDataSaver(continuous_data_table='dateplots_hall',
username=credentials.user,
password=credentials.passwd,
measurement_codenames=['n5_argon_pressure'])
db_logger.start()
time.sleep(2)
while True:
time.sleep(0.25)
value = logger.read_value()
socket.set_point_now('n5_argon_pressure', value)
live_socket.set_point_now('n5_argon_pressure', value)
if logger.read_trigged():
print(value)
db_logger.save_point_now('n5_argon_pressure', value)
logger.clear_trigged()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
831d89d9010f5aa0c82cbaac8e30bf85c06391ab
|
364ce434984eedab1ed491ad3e12bb245eeddf8b
|
/Fwd Converter Tool and Script/parse.py
|
9217746b601771a526fa995f78a529361d9bfb3e
|
[] |
no_license
|
anirudhdahiya9/sentence-type-identification
|
c532dffb14efcfb44444e2f737ddaa10d3673953
|
b4271a5b4f5c214fdcbe10582220c2cf3300c826
|
refs/heads/master
| 2021-01-19T05:03:53.533891 | 2016-07-05T10:47:20 | 2016-07-05T10:47:20 | 61,596,642 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,271 |
py
|
#!/usr/bin/python
import os
import sys
import codecs
inp = sys.argv[1]
out = sys.argv[2]
count = 1
for line in codecs.open(inp, 'r', 'utf-8'):
if line.strip() != '':
codecs.open('temp.out', 'w', 'utf-8').write(line)
os.system("sh $SHALLOW_PARSER_HIN/bin/sl/tokenizer/tokenizer.sh temp.out > temp1.out")
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp1.out -o=temp2.out')
os.system('sh $SHALLOW_PARSER_HIN/bin/sl/morph/hin/morph.sh temp2.out>>temp3.out')
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp3.out -o=temp4.out')
os.system("sh $SHALLOW_PARSER_HIN/bin/sl/postagger/hin/postagger.sh temp4.out > temp5.out")
#os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp5.out -o=temp6.out')
#os.system("sh $SHALLOW_PARSER_HIN/bin/sl/chunker/hin/chunker.sh temp6.out > temp7.out")
#os.system(" perl $SHALLOW_PARSER_HIN/bin/sl/pruning/pruning.pl --path=$SHALLOW_PARSER_HIN/bin/sl/pruning/ --resource=$SHALLOW_PARSER_HIN/data_bin/sl/pruning/mapping.dat < temp8.out | perl $SHALLOW_PARSER_HIN/bin/sl/pickonemorph/pickonemorph.pl --path=$SHALLOW_PARSER_HIN/bin/sl/pickonemorph/ | perl $SHALLOW_PARSER_HIN/bin/sl/headcomputation/headcomputation.pl --path=$SHALLOW_PARSER_HIN/bin/sl/headcomputation/ | perl $SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/vibhakticomputation.pl --path=$SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/ | perl $SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/printinput.pl")
#os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp9.out -o=temp10.out')
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=wx -t=utf -i=temp5.out>>' + out)
os.system('rm temp.out')
os.system('rm temp1.out')
os.system('rm temp2.out')
os.system('rm temp3.out')
os.system('rm temp4.out')
os.system('rm temp5.out')
#os.system('rm temp6.out')
#os.system('rm temp7.out')
#os.system('rm temp8.out')
#os.system('rm temp9.out')
print("Processed Line no " + str(count))
count += 1
|
[
"[email protected]"
] | |
7f74138e0d9edcce5ccd2899fbd27e9087eee765
|
f22efea488f85d3ce88f9a5be7b8b6a0f589f747
|
/widgets/img/ex2_img.py
|
b536c2186829a2a491f5c328edc5f7ea5334d031
|
[
"MIT"
] |
permissive
|
maozhifeng/lv_mpy_examples
|
c96cf97db58a93a65d2c3944a759694e9d7bd54d
|
9c5abcf562b3eb4ace65e658cb5a5ca5443347ba
|
refs/heads/main
| 2023-02-01T18:27:28.496638 | 2020-12-08T15:54:18 | 2020-12-08T15:54:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,346 |
py
|
#!/opt/bin/lv_micropython
import time
import lvgl as lv
import init_gui
from lv_colors import lv_colors
from imagetools import get_png_info, open_png
SLIDER_WIDTH=15
# Register PNG image decoder
decoder = lv.img.decoder_create()
decoder.info_cb = get_png_info
decoder.open_cb = open_png
with open('img_cogwheel_argb.png','rb') as f:
png_data = f.read()
png_img_dsc = lv.img_dsc_t({
'data_size': len(png_data),
'data': png_data
})
def slider_event_cb(slider,event):
if event == lv.EVENT.VALUE_CHANGED:
# Recolor the image based on the sliders' values
color = lv.color_make(red_slider.get_value(), green_slider.get_value(), blue_slider.get_value())
intense = intense_slider.get_value()
img1.set_style_local_image_recolor_opa(lv.img.PART.MAIN, lv.STATE.DEFAULT, intense)
img1.set_style_local_image_recolor(lv.img.PART.MAIN, lv.STATE.DEFAULT, color)
# Create a set of RGB sliders
# Use the red one as a base for all the settings
red_slider = lv.slider(lv.scr_act(), None)
red_slider.set_range(0, 255)
red_slider.set_size(SLIDER_WIDTH, 200) # Be sure it's a vertical slider
red_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.RED)
red_slider.set_event_cb(slider_event_cb)
# Copy it for the other three sliders
green_slider = lv.slider(lv.scr_act(), red_slider)
green_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.LIME)
green_slider.set_event_cb(slider_event_cb)
blue_slider = lv.slider(lv.scr_act(), red_slider)
blue_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.BLUE)
blue_slider.set_event_cb(slider_event_cb)
intense_slider = lv.slider(lv.scr_act(), red_slider)
intense_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.GRAY)
intense_slider.set_value(255, lv.ANIM.OFF)
intense_slider.set_event_cb(slider_event_cb)
red_slider.align(None, lv.ALIGN.IN_LEFT_MID, 15, 0)
green_slider.align(red_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
blue_slider.align(green_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
intense_slider.align(blue_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
img1 = lv.img(lv.scr_act(),None)
lv.img.cache_set_size(2)
img1.align(lv.scr_act(), lv.ALIGN.CENTER, 50, -30)
img1.set_src(png_img_dsc)
while True:
lv.task_handler()
time.sleep_ms(10)
|
[
"[email protected]"
] | |
3b4c5a53b2b3f98002af33023a574713c44a007d
|
8e07f5f06452f9566640d2130a5c1bcefcebd745
|
/peter/completecrm/cases/forms.py
|
9a207afbdaa4159c453ba6a4cc06593941fbc2e9
|
[
"MIT"
] |
permissive
|
bot242/djangocrm
|
65dbe42a814fd538d77ec9c0cc5626a7d6ce19b4
|
6f5e64b4f65dbb13583d68ef5f6a3feaea51befb
|
refs/heads/main
| 2023-01-23T10:29:32.338620 | 2020-12-02T06:43:35 | 2020-12-02T06:43:35 | 317,773,468 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,996 |
py
|
from django import forms
from cases.models import Case
from common.models import Comment, Attachments
from teams.models import Teams
# import phonenumbers
import datetime
class CaseForm(forms.ModelForm):
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
casecount = Case.objects.all().count()
print("CASECOUNT:",casecount)
c = casecount+1
assigned_users = kwargs.pop('assigned_to', [])
case_accounts = kwargs.pop('account', [])
case_contacts = kwargs.pop('contacts', [])
super(CaseForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({
'rows': '4'})
self.fields['address'].widget.attrs.update({
'rows': '4'})
self.fields['action_items'].widget.attrs.update({
'rows': '4'})
self.fields['parent_description'].widget.attrs.update({
'rows': '3'})
if assigned_users:
self.fields['assigned_to'].queryset = assigned_users
self.fields['assigned_to'].required = False
self.fields['assigned_date'].required = False
self.fields['assigned_date'].widget.attrs['readonly'] = True
self.fields['assigned_date' ].input_formats = [ '%d-%m-%Y %H:%M:%S' ]
self.fields['assigned_date'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
self.fields['account'].queryset = case_accounts
self.fields['contacts'].queryset = case_contacts
self.fields['contacts'].required = False
self.fields['case_number'].required = True
self.fields['case_number'].initial = "C_00"+str(c)
self.fields['case_number'].widget.attrs['readonly'] = True
self.fields['creation_date'].required = True
self.fields['creation_date'].widget.attrs['readonly'] = True
self.fields['creation_date'].input_formats = [ '%d-%m-%Y %H:%M:%S' ]
self.fields['creation_date'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
self.fields['case_type'].required = True
self.fields['closed_on'].required = False
self.fields['closed_on'].widget.attrs['readonly'] = True
self.fields['closed_on' ].input_formats = [ '%d-%m-%Y %H:%M:%S']
self.fields['closed_on'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
# self.fields['assigned_to'].required = True
# self.fields['assigned_date'].required = True
self.fields['sla'].widget.attrs.update({'class' :'sla'})
# self.fields['sla'].widget.attrs['placeholder'] = "00:00:00"
for key, value in self.fields.items():
value.widget.attrs['placeholder'] = value.label
self.fields['parent_case'].widget.attrs['placeholder'] ="Related Case"
self.fields['name'].widget.attrs['placeholder'] = "Contact name"
self.fields['phone1'].widget.attrs['placeholder'] = "Phone/Mobile"
self.fields["teams"].choices = [(team.get('id'), team.get('name')) for team in Teams.objects.all().values('id', 'name')]
self.fields["teams"].required = False
class Meta:
model = Case
fields = ('assigned_to','phone1', 'name', 'status',
'priority', 'case_type', 'account','remark',
'contacts', 'closed_on', 'description', 'sla',
'case_number', 'email', 'address', 'action_items', 'creation_date','assigned_date','parent_case','parent_description')
widgets = {
'phone1': forms.NumberInput(attrs={'class': 'form-control','type': 'number'})
# widget=forms.TextInput(attrs={'min':1,'max': '5','type': 'number'}))
}
# def clean_name(self):
# name = self.cleaned_data['name']
# case = Case.objects.filter(
# name__iexact=name).exclude(id=self.instance.id)
# if case:
# raise forms.ValidationError("Case Already Exists with this Name")
# else:
# return name
# def clean_phone1(self):
# phone1 = self.cleaned_data.get("phone1")
# z = phonenumbers.parse(phone1)
# if not phonenumbers.is_valid_number(z):
# raise forms.ValidationError("Number not in valid")
# return phone1
class CaseCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=255, required=True)
class Meta:
model = Comment
fields = ('comment', 'case', 'commented_by', )
class CaseAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'case')
# class SlaForm(forms.ModelForm):
# class Meta:
# time = forms.TimeField(input_formats=["%H:%M"])
# model = Sla
# fields = ('status','time')
|
[
"[email protected]"
] | |
742778cfbe8961fcfb828688a65fe536e706c2ac
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/ACL_PyTorch/contrib/cv/image_process/DnCNN/data_preprocess.py
|
729ade7a02e497f94c8c0476117cbb5214c790b1
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 2,544 |
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import os
import os.path
import numpy as np
import random
import torch
import cv2
import glob
infer_data = 'Set68'
infer_noiseL = 15
def normalize(data):
return data / 255.
def proprecess(data_path, ISource_bin, INoisy_bin):
# load data info
print('Loading data info ...\n')
files = glob.glob(os.path.join(data_path, infer_data, '*.png'))
files.sort()
# process data
for i in range(len(files)):
# image
filename = os.path.basename(files[i])
img = cv2.imread(files[i])
img = normalize(np.float32(img[:, :, 0]))
img_padded = np.full([481, 481], 0, dtype=np.float32)
width_offset = (481 - img.shape[1]) // 2
height_offset = (481 - img.shape[0]) // 2
img_padded[height_offset:height_offset + img.shape[0], width_offset:width_offset + img.shape[1]] = img
img = img_padded
img = np.expand_dims(img, 0)
img = np.expand_dims(img, 1)
ISource = torch.Tensor(img)
# noise
noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=infer_noiseL / 255.)
# noisy image
INoisy = ISource + noise
# save ISource_bin
ISource = ISource.numpy()
print("ISource shape is", ISource.shape)
ISource.tofile(os.path.join(ISource_bin, filename.split('.')[0] + '.bin'))
# save INoisy_bin
INoisy = INoisy.numpy()
print("INoisy shape is", INoisy.shape)
INoisy.tofile(os.path.join(INoisy_bin, filename.split('.')[0] + '.bin'))
if __name__ == '__main__':
data_path = sys.argv[1]
ISource_bin = sys.argv[2]
INoisy_bin = sys.argv[3]
if os.path.exists(ISource_bin) is False:
os.mkdir(ISource_bin)
if os.path.exists(INoisy_bin) is False:
os.mkdir(INoisy_bin)
proprecess(data_path, ISource_bin, INoisy_bin)
|
[
"[email protected]"
] | |
f133d16d6fe109ffa3206420b69fd288981df2cb
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/common/lib/compiler/pyassem.py
|
e3100e515519b439a978b7a01d0545e12b960d99
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 21,220 |
py
|
# 2015.11.10 21:34:37 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/compiler/pyassem.py
"""A flow graph representation for Python bytecode"""
import dis
import types
import sys
from compiler import misc
from compiler.consts import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
class FlowGraph():
def __init__(self):
self.current = self.entry = Block()
self.exit = Block('exit')
self.blocks = misc.Set()
self.blocks.add(self.entry)
self.blocks.add(self.exit)
def startBlock(self, block):
if self._debug:
if self.current:
print 'end', repr(self.current)
print ' next', self.current.next
print ' prev', self.current.prev
print ' ', self.current.get_children()
print repr(block)
self.current = block
def nextBlock(self, block = None):
if block is None:
block = self.newBlock()
self.current.addNext(block)
self.startBlock(block)
return
def newBlock(self):
b = Block()
self.blocks.add(b)
return b
def startExitBlock(self):
self.startBlock(self.exit)
_debug = 0
def _enable_debug(self):
self._debug = 1
def _disable_debug(self):
self._debug = 0
def emit(self, *inst):
if self._debug:
print '\t', inst
if len(inst) == 2 and isinstance(inst[1], Block):
self.current.addOutEdge(inst[1])
self.current.emit(inst)
def getBlocksInOrder(self):
"""Return the blocks in reverse postorder
i.e. each node appears before all of its successors
"""
order = order_blocks(self.entry, self.exit)
return order
def getBlocks(self):
return self.blocks.elements()
def getRoot(self):
"""Return nodes appropriate for use with dominator"""
return self.entry
def getContainedGraphs(self):
l = []
for b in self.getBlocks():
l.extend(b.getContainedGraphs())
return l
def order_blocks(start_block, exit_block):
"""Order blocks so that they are emitted in the right order"""
order = []
remaining = set()
todo = [start_block]
while todo:
b = todo.pop()
if b in remaining:
continue
remaining.add(b)
for c in b.get_children():
if c not in remaining:
todo.append(c)
dominators = {}
for b in remaining:
if not (__debug__ and b.next and b is b.next[0].prev[0]):
raise AssertionError((b, b.next))
dominators.setdefault(b, set())
for c in b.get_followers():
while 1:
dominators.setdefault(c, set()).add(b)
if c.prev and c.prev[0] is not b:
c = c.prev[0]
else:
break
def find_next():
for b in remaining:
for c in dominators[b]:
if c in remaining:
break
else:
return b
raise 0 or AssertionError('circular dependency, cannot find next block')
b = start_block
while 1:
order.append(b)
remaining.discard(b)
if b.next:
b = b.next[0]
continue
elif b is not exit_block and not b.has_unconditional_transfer():
order.append(exit_block)
if not remaining:
break
b = find_next()
return order
class Block():
_count = 0
def __init__(self, label = ''):
self.insts = []
self.outEdges = set()
self.label = label
self.bid = Block._count
self.next = []
self.prev = []
Block._count = Block._count + 1
def __repr__(self):
if self.label:
return '<block %s id=%d>' % (self.label, self.bid)
else:
return '<block id=%d>' % self.bid
def __str__(self):
insts = map(str, self.insts)
return '<block %s %d:\n%s>' % (self.label, self.bid, '\n'.join(insts))
def emit(self, inst):
op = inst[0]
self.insts.append(inst)
def getInstructions(self):
return self.insts
def addOutEdge(self, block):
self.outEdges.add(block)
def addNext(self, block):
self.next.append(block)
raise len(self.next) == 1 or AssertionError(map(str, self.next))
block.prev.append(self)
raise len(block.prev) == 1 or AssertionError(map(str, block.prev))
_uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS', 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
def has_unconditional_transfer(self):
"""Returns True if there is an unconditional transfer to an other block
at the end of this block. This means there is no risk for the bytecode
executer to go past this block's bytecode."""
try:
op, arg = self.insts[-1]
except (IndexError, ValueError):
return
return op in self._uncond_transfer
def get_children(self):
return list(self.outEdges) + self.next
def get_followers(self):
"""Get the whole list of followers, including the next block."""
followers = set(self.next)
for inst in self.insts:
if inst[0] in PyFlowGraph.hasjrel:
followers.add(inst[1])
return followers
def getContainedGraphs(self):
"""Return all graphs contained within this block.
For example, a MAKE_FUNCTION block will contain a reference to
the graph for the function body.
"""
contained = []
for inst in self.insts:
if len(inst) == 1:
continue
op = inst[1]
if hasattr(op, 'graph'):
contained.append(op.graph)
return contained
RAW = 'RAW'
FLAT = 'FLAT'
CONV = 'CONV'
DONE = 'DONE'
class PyFlowGraph(FlowGraph):
super_init = FlowGraph.__init__
def __init__(self, name, filename, args = (), optimized = 0, klass = None):
self.super_init()
self.name = name
self.filename = filename
self.docstring = None
self.args = args
self.argcount = getArgCount(args)
self.klass = klass
if optimized:
self.flags = CO_OPTIMIZED | CO_NEWLOCALS
else:
self.flags = 0
self.consts = []
self.names = []
self.freevars = []
self.cellvars = []
self.closure = []
self.varnames = list(args) or []
for i in range(len(self.varnames)):
var = self.varnames[i]
if isinstance(var, TupleArg):
self.varnames[i] = var.getName()
self.stage = RAW
return
def setDocstring(self, doc):
self.docstring = doc
def setFlag(self, flag):
self.flags = self.flags | flag
if flag == CO_VARARGS:
self.argcount = self.argcount - 1
def checkFlag(self, flag):
if self.flags & flag:
return 1
def setFreeVars(self, names):
self.freevars = list(names)
def setCellVars(self, names):
self.cellvars = names
def getCode(self):
"""Get a Python code object"""
raise self.stage == RAW or AssertionError
self.computeStackDepth()
self.flattenGraph()
raise self.stage == FLAT or AssertionError
self.convertArgs()
raise self.stage == CONV or AssertionError
self.makeByteCode()
raise self.stage == DONE or AssertionError
return self.newCodeObject()
def dump(self, io = None):
if io:
save = sys.stdout
sys.stdout = io
pc = 0
for t in self.insts:
opname = t[0]
if opname == 'SET_LINENO':
print
if len(t) == 1:
print '\t', '%3d' % pc, opname
pc = pc + 1
else:
print '\t', '%3d' % pc, opname, t[1]
pc = pc + 3
if io:
sys.stdout = save
def computeStackDepth(self):
"""Compute the max stack depth.
Approach is to compute the stack effect of each basic block.
Then find the path through the code with the largest total
effect.
"""
depth = {}
exit = None
for b in self.getBlocks():
depth[b] = findDepth(b.getInstructions())
seen = {}
def max_depth(b, d):
if b in seen:
return d
seen[b] = 1
d = d + depth[b]
children = b.get_children()
if children:
return max([ max_depth(c, d) for c in children ])
elif not b.label == 'exit':
return max_depth(self.exit, d)
else:
return d
self.stacksize = max_depth(self.entry, 0)
return
def flattenGraph(self):
"""Arrange the blocks in order and resolve jumps"""
raise self.stage == RAW or AssertionError
self.insts = insts = []
pc = 0
begin = {}
end = {}
for b in self.getBlocksInOrder():
begin[b] = pc
for inst in b.getInstructions():
insts.append(inst)
if len(inst) == 1:
pc = pc + 1
elif inst[0] != 'SET_LINENO':
pc = pc + 3
end[b] = pc
pc = 0
for i in range(len(insts)):
inst = insts[i]
if len(inst) == 1:
pc = pc + 1
elif inst[0] != 'SET_LINENO':
pc = pc + 3
opname = inst[0]
if opname in self.hasjrel:
oparg = inst[1]
offset = begin[oparg] - pc
insts[i] = (opname, offset)
elif opname in self.hasjabs:
insts[i] = (opname, begin[inst[1]])
self.stage = FLAT
hasjrel = set()
for i in dis.hasjrel:
hasjrel.add(dis.opname[i])
hasjabs = set()
for i in dis.hasjabs:
hasjabs.add(dis.opname[i])
def convertArgs(self):
"""Convert arguments from symbolic to concrete form"""
raise self.stage == FLAT or AssertionError
self.consts.insert(0, self.docstring)
self.sort_cellvars()
for i in range(len(self.insts)):
t = self.insts[i]
if len(t) == 2:
opname, oparg = t
conv = self._converters.get(opname, None)
if conv:
self.insts[i] = (opname, conv(self, oparg))
self.stage = CONV
return
def sort_cellvars(self):
"""Sort cellvars in the order of varnames and prune from freevars.
"""
cells = {}
for name in self.cellvars:
cells[name] = 1
self.cellvars = [ name for name in self.varnames if name in cells ]
for name in self.cellvars:
del cells[name]
self.cellvars = self.cellvars + cells.keys()
self.closure = self.cellvars + self.freevars
def _lookupName(self, name, list):
"""Return index of name in list, appending if necessary
This routine uses a list instead of a dictionary, because a
dictionary can't store two different keys if the keys have the
same value but different types, e.g. 2 and 2L. The compiler
must treat these two separately, so it does an explicit type
comparison before comparing the values.
"""
t = type(name)
for i in range(len(list)):
if t == type(list[i]) and list[i] == name:
return i
end = len(list)
list.append(name)
return end
_converters = {}
def _convert_LOAD_CONST(self, arg):
if hasattr(arg, 'getCode'):
arg = arg.getCode()
return self._lookupName(arg, self.consts)
def _convert_LOAD_FAST(self, arg):
self._lookupName(arg, self.names)
return self._lookupName(arg, self.varnames)
_convert_STORE_FAST = _convert_LOAD_FAST
_convert_DELETE_FAST = _convert_LOAD_FAST
def _convert_LOAD_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
def _convert_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
_convert_STORE_NAME = _convert_NAME
_convert_DELETE_NAME = _convert_NAME
_convert_IMPORT_NAME = _convert_NAME
_convert_IMPORT_FROM = _convert_NAME
_convert_STORE_ATTR = _convert_NAME
_convert_LOAD_ATTR = _convert_NAME
_convert_DELETE_ATTR = _convert_NAME
_convert_LOAD_GLOBAL = _convert_NAME
_convert_STORE_GLOBAL = _convert_NAME
_convert_DELETE_GLOBAL = _convert_NAME
def _convert_DEREF(self, arg):
self._lookupName(arg, self.names)
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_convert_LOAD_DEREF = _convert_DEREF
_convert_STORE_DEREF = _convert_DEREF
def _convert_LOAD_CLOSURE(self, arg):
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_cmp = list(dis.cmp_op)
def _convert_COMPARE_OP(self, arg):
return self._cmp.index(arg)
for name, obj in locals().items():
if name[:9] == '_convert_':
opname = name[9:]
_converters[opname] = obj
del name
del obj
del opname
def makeByteCode(self):
raise self.stage == CONV or AssertionError
self.lnotab = lnotab = LineAddrTable()
for t in self.insts:
opname = t[0]
if len(t) == 1:
lnotab.addCode(self.opnum[opname])
else:
oparg = t[1]
if opname == 'SET_LINENO':
lnotab.nextLine(oparg)
continue
hi, lo = twobyte(oparg)
try:
lnotab.addCode(self.opnum[opname], lo, hi)
except ValueError:
print opname, oparg
print self.opnum[opname], lo, hi
raise
self.stage = DONE
opnum = {}
for num in range(len(dis.opname)):
opnum[dis.opname[num]] = num
del num
def newCodeObject(self):
if not self.stage == DONE:
raise AssertionError
if self.flags & CO_NEWLOCALS == 0:
nlocals = 0
else:
nlocals = len(self.varnames)
argcount = self.argcount
argcount = self.flags & CO_VARKEYWORDS and argcount - 1
return types.CodeType(argcount, nlocals, self.stacksize, self.flags, self.lnotab.getCode(), self.getConsts(), tuple(self.names), tuple(self.varnames), self.filename, self.name, self.lnotab.firstline, self.lnotab.getTable(), tuple(self.freevars), tuple(self.cellvars))
def getConsts(self):
"""Return a tuple for the const slot of the code object
Must convert references to code (MAKE_FUNCTION) to code
objects recursively.
"""
l = []
for elt in self.consts:
if isinstance(elt, PyFlowGraph):
elt = elt.getCode()
l.append(elt)
return tuple(l)
def isJump(opname):
if opname[:4] == 'JUMP':
return 1
class TupleArg():
"""Helper for marking func defs with nested tuples in arglist"""
def __init__(self, count, names):
self.count = count
self.names = names
def __repr__(self):
return 'TupleArg(%s, %s)' % (self.count, self.names)
def getName(self):
return '.%d' % self.count
def getArgCount(args):
argcount = len(args)
if args:
for arg in args:
if isinstance(arg, TupleArg):
numNames = len(misc.flatten(arg.names))
argcount = argcount - numNames
return argcount
def twobyte(val):
"""Convert an int argument into high and low bytes"""
raise isinstance(val, int) or AssertionError
return divmod(val, 256)
class LineAddrTable():
"""lnotab
This class builds the lnotab, which is documented in compile.c.
Here's a brief recap:
For each SET_LINENO instruction after the first one, two bytes are
added to lnotab. (In some cases, multiple two-byte entries are
added.) The first byte is the distance in bytes between the
instruction for the last SET_LINENO and the current SET_LINENO.
The second byte is offset in line numbers. If either offset is
greater than 255, multiple two-byte entries are added -- see
compile.c for the delicate details.
"""
def __init__(self):
self.code = []
self.codeOffset = 0
self.firstline = 0
self.lastline = 0
self.lastoff = 0
self.lnotab = []
def addCode(self, *args):
for arg in args:
self.code.append(chr(arg))
self.codeOffset = self.codeOffset + len(args)
def nextLine(self, lineno):
if self.firstline == 0:
self.firstline = lineno
self.lastline = lineno
else:
addr = self.codeOffset - self.lastoff
line = lineno - self.lastline
if line >= 0:
push = self.lnotab.append
while addr > 255:
push(255)
push(0)
addr -= 255
while line > 255:
push(addr)
push(255)
line -= 255
addr = 0
if addr > 0 or line > 0:
push(addr)
push(line)
self.lastline = lineno
self.lastoff = self.codeOffset
def getCode(self):
return ''.join(self.code)
def getTable(self):
return ''.join(map(chr, self.lnotab))
class StackDepthTracker():
def findDepth(self, insts, debug = 0):
depth = 0
maxDepth = 0
for i in insts:
opname = i[0]
if debug:
print i,
delta = self.effect.get(opname, None)
if delta is not None:
depth = depth + delta
else:
for pat, pat_delta in self.patterns:
if opname[:len(pat)] == pat:
delta = pat_delta
depth = depth + delta
break
if delta is None:
meth = getattr(self, opname, None)
if meth is not None:
depth = depth + meth(i[1])
if depth > maxDepth:
maxDepth = depth
if debug:
print depth, maxDepth
return maxDepth
effect = {'POP_TOP': -1,
'DUP_TOP': 1,
'LIST_APPEND': -1,
'SET_ADD': -1,
'MAP_ADD': -2,
'SLICE+1': -1,
'SLICE+2': -1,
'SLICE+3': -2,
'STORE_SLICE+0': -1,
'STORE_SLICE+1': -2,
'STORE_SLICE+2': -2,
'STORE_SLICE+3': -3,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
'PRINT_ITEM': -1,
'RETURN_VALUE': -1,
'YIELD_VALUE': -1,
'EXEC_STMT': -3,
'BUILD_CLASS': -2,
'STORE_NAME': -1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'BUILD_MAP': 1,
'COMPARE_OP': -1,
'STORE_FAST': -1,
'IMPORT_STAR': -1,
'IMPORT_NAME': -1,
'IMPORT_FROM': 1,
'LOAD_ATTR': 0,
'SETUP_EXCEPT': 3,
'SETUP_FINALLY': 3,
'FOR_ITER': 1,
'WITH_CLEANUP': -1}
patterns = [('BINARY_', -1), ('LOAD_', 1)]
def UNPACK_SEQUENCE(self, count):
return count - 1
def BUILD_TUPLE(self, count):
return -count + 1
def BUILD_LIST(self, count):
return -count + 1
def BUILD_SET(self, count):
return -count + 1
def CALL_FUNCTION(self, argc):
hi, lo = divmod(argc, 256)
return -(lo + hi * 2)
def CALL_FUNCTION_VAR(self, argc):
return self.CALL_FUNCTION(argc) - 1
def CALL_FUNCTION_KW(self, argc):
return self.CALL_FUNCTION(argc) - 1
def CALL_FUNCTION_VAR_KW(self, argc):
return self.CALL_FUNCTION(argc) - 2
def MAKE_FUNCTION(self, argc):
return -argc
def MAKE_CLOSURE(self, argc):
return -argc
def BUILD_SLICE(self, argc):
if argc == 2:
return -1
if argc == 3:
return -2
def DUP_TOPX(self, argc):
return argc
findDepth = StackDepthTracker().findDepth
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\compiler\pyassem.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:34:38 Střední Evropa (běžný čas)
|
[
"[email protected]"
] | |
5b2880d4193c0aca32d56ce78f67f59b0a7be22d
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1082.py
|
b8f95efbdc487c05e58b233e9de62f8739a7bc2f
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,293 |
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/C42A2FC9-76FC-7F4E-929A-F2957BCFBF0D.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1082.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"[email protected]"
] | |
5ecede91c8a95285d7e7ec86cac670978ed2ac09
|
d92aa5c7945348dcf522ededb924c4f60a49d39a
|
/COT/commands/add_disk.py
|
a90b766fe8382cccd71a950274107049ee0460bd
|
[
"MIT"
] |
permissive
|
jarrod180/cot
|
254ba1a3c2216bbbac1b68fcce29723df9e2a14a
|
01b1446d8211648fb514446c5cc15569e24171d5
|
refs/heads/master
| 2020-04-12T00:17:14.659414 | 2018-05-07T17:26:41 | 2018-05-07T17:26:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,697 |
py
|
#!/usr/bin/env python
#
# add_disk.py - Implements "cot add-disk" command
#
# August 2013, Glenn F. Matthews
# Copyright (c) 2013-2018 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Module for adding disks to VMs.
**Functions**
.. autosummary::
:nosignatures:
add_disk_worker
confirm_elements
guess_controller_type
search_for_elements
validate_elements
validate_controller_address
**Classes**
.. autosummary::
:nosignatures:
COTAddDisk
"""
import logging
import os.path
from COT.disks import DiskRepresentation
from COT.data_validation import (
InvalidInputError, ValueUnsupportedError,
check_for_conflict, device_address, match_or_die,
)
from .command import command_classes, ReadWriteCommand
logger = logging.getLogger(__name__)
def validate_controller_address(controller, address):
"""Check validity of the given address string for the given controller.
Helper method for the :attr:`controller`/:attr:`address` setters.
Args:
controller (str): "ide", "sata", or "scsi"
address (str): A string like '0:0' or '2:10'
Raises:
InvalidInputError: if the address/controller combo is invalid.
Examples:
::
>>> validate_controller_address("ide", "0:0")
>>> try:
... validate_controller_address("ide", "1:3")
... except InvalidInputError as e:
... print(e)
IDE disk address must be between 0:0 and 1:1
>>> validate_controller_address("scsi", "1:3")
>>> try:
... validate_controller_address("scsi", "4:0")
... except InvalidInputError as e:
... print(e)
SCSI disk address must be between 0:0 and 3:15
>>> validate_controller_address("sata", "0:0")
>>> validate_controller_address("sata", "1:3")
"""
logger.debug("validate_controller_address: %s, %s", controller, address)
if controller is not None and address is not None:
logger.verbose("Validating address %s for controller type %s",
address, controller)
ctrl_addr = address.split(":")[0]
disk_addr = address.split(":")[1]
if controller == "scsi" and (int(ctrl_addr) > 3 or
int(disk_addr) > 15):
raise InvalidInputError(
"SCSI disk address must be between 0:0 and 3:15")
elif controller == "ide" and (int(ctrl_addr) > 1 or
int(disk_addr) > 1):
raise InvalidInputError(
"IDE disk address must be between 0:0 and 1:1")
class COTAddDisk(ReadWriteCommand):
"""Add or replace a disk in a virtual machine.
Inherited attributes:
:attr:`~Command.ui`,
:attr:`~ReadWriteCommand.package`,
:attr:`~ReadWriteCommand.output`
Attributes:
:attr:`disk_image`,
:attr:`drive_type`,
:attr:`file_id`,
:attr:`controller`,
:attr:`subtype`,
:attr:`address`,
:attr:`diskname`,
:attr:`description`
"""
def __init__(self, ui):
"""Instantiate this command with the given UI.
Args:
ui (UI): User interface instance.
"""
super(COTAddDisk, self).__init__(ui)
self._disk_image = None
self.drive_type = None
"""Disk drive type ('harddisk' or 'cdrom')."""
self.subtype = None
"""Controller subtype, such as "virtio"."""
self.file_id = None
"""File identifier to map disk to file."""
self._controller = None
self._address = None
self.diskname = None
"""Name string for the disk."""
self.description = None
"""Description of the disk."""
@property
def disk_image(self):
"""Disk image file to add to the VM.
Raises:
InvalidInputError: if the file does not exist.
"""
return self._disk_image
@disk_image.setter
def disk_image(self, value):
self._disk_image = DiskRepresentation.from_file(value)
@property
def address(self):
"""Disk device address on controller (``1:0``, etc.).
Raises:
InvalidInputError: see :meth:`validate_controller_address`
"""
return self._address
@address.setter
def address(self, value):
logger.debug("Setting address to '%s'", value)
validate_controller_address(self.controller, value)
self._address = value
@property
def controller(self):
"""Disk controller type (``ide``, ``sata``, ``scsi``).
Raises:
InvalidInputError: see :meth:`validate_controller_address`
"""
return self._controller
@controller.setter
def controller(self, value):
logger.debug("Setting controller to '%s'", value)
validate_controller_address(value, self.address)
self._controller = value
def ready_to_run(self):
"""Check whether the module is ready to :meth:`run`.
Returns:
tuple: ``(True, ready_message)`` or ``(False, reason_why_not)``
"""
if self.disk_image is None:
return False, "DISK_IMAGE is a mandatory argument!"
elif self.address is not None and self.controller is None:
return False, ("When specifying an address you must also "
"specify the controller type")
return super(COTAddDisk, self).ready_to_run()
def run(self):
"""Do the actual work of this command.
Raises:
InvalidInputError: if :meth:`ready_to_run` reports ``False``
"""
super(COTAddDisk, self).run()
add_disk_worker(self.vm,
ui=self.ui,
disk_image=self.disk_image,
drive_type=self.drive_type,
subtype=self.subtype,
file_id=self.file_id,
controller=self.controller,
address=self.address,
diskname=self.diskname,
description=self.description)
def create_subparser(self):
"""Create 'add-disk' CLI subparser."""
parser = self.ui.add_subparser(
'add-disk',
aliases=['add-drive'],
add_help=False,
usage=self.ui.fill_usage("add-disk", [
"DISK_IMAGE PACKAGE [-o OUTPUT] [-f FILE_ID] \
[-t {harddisk,cdrom}] [-c {ide,sata,scsi}] [-s SUBTYPE] [-a ADDRESS] \
[-d DESCRIPTION] [-n DISKNAME]"
]),
help="""Add a disk image to an OVF package and map it as a disk
in the guest environment""",
description="""
Add or replace a disk image in the specified OVF or OVA.
If the specified disk image, controller/address, file-id, and/or instance
match an existing entry in the OVF, will replace the existing disk with
the provided file (prompting for confirmation if --force was not set);
otherwise, will create a new disk entry.""")
group = parser.add_argument_group("general options")
group.add_argument('-h', '--help', action='help',
help="""Show this help message and exit""")
group.add_argument('-o', '--output',
help="""Name/path of new OVF/OVA package to """
"""create instead of updating the existing OVF""")
group = parser.add_argument_group("disk-related options")
group.add_argument('-f', '--file-id',
help="""Disk image file ID string within the OVF """
"""package (default: use disk image filename)""")
group.add_argument('-t', '--type',
dest='drive_type',
choices=['harddisk', 'cdrom'],
help="""Disk drive type (default: files ending """
"""in .vmdk/.raw/.qcow2/.img will use harddisk """
"""and files ending in .iso will use cdrom)""")
group = parser.add_argument_group("controller-related options")
group.add_argument('-c', '--controller',
choices=['ide', 'sata', 'scsi'],
help="""Disk controller type (default: """
"""determined by disk drive type and platform)""")
group.add_argument('-a', '--address', type=device_address,
help="""Address of the disk, such as "1:0". """
"""Requires that --controller be explicitly set. """
"""(default: use first unused address on the """
"""controller)""")
group.add_argument('-s', '--subtype',
help="""Disk controller subtype such as """
""""virtio", "lsilogic", or "AHCI".""")
group = parser.add_argument_group("descriptive options")
group.add_argument('-d', '--description',
help="""Description of this disk (optional)""")
group.add_argument('-n', '--name', dest='diskname',
help="""Name of this disk (default: """
""""Hard disk #" or "CD-ROM #" as appropriate)""")
parser.add_argument('DISK_IMAGE',
help="""Disk image file to add to the package""")
parser.add_argument('PACKAGE',
help="""OVF descriptor or OVA file to edit""")
parser.set_defaults(instance=self)
def search_for_elements(vm, disk_file, file_id, controller, address):
"""Search for a unique set of objects based on the given criteria.
A disk is defined by up to four different sections in the OVF:
* File (references the actual disk image file)
* Disk (references the File, only used for HD not CD-ROM)
* Item (defines the SCSI/IDE controller)
* Item (defines the disk drive, links to controller and File or Disk)
For each of these four sections, we need to know whether to add
a new one or overwrite an existing one. Depending on the user
arguments, we can do this by as many as three different approaches:
1. Check whether the DISK_IMAGE file name matches an existing File
in the OVF (and from there, find the associated Disk and Items)
2. Check whether the file-id matches an existing File and/or Disk
in the OVF (and from there, find the associated Items)
3. Check whether controller type and/or device address match existing Items
in the OVF (and from there, find the associated Disk and/or File)
Where it gets extra fun is if the user has specified more than one
of the above arguments - in which case we need to make sure that
all relevant approaches agree on what sections we're talking about...
Args:
vm (VMDescription): Virtual machine object
disk_file (str): Disk file name or path
file_id (str): File identifier
controller (str): controller type -- "ide", "sata", or "scsi"
address (str): device address, such as "1:0"
Raises:
ValueMismatchError: if the criteria select a non-unique set.
Returns:
tuple: (file_object, disk_object, controller_item, disk_item)
"""
# 1) Check whether the DISK_IMAGE file name matches an existing File
# in the OVF (and from there, find the associated Disk and Items)
(file1, disk1, ctrlitem1, diskitem1) = vm.search_from_filename(disk_file)
# 2) Check whether the --file-id matches an existing File and/or Disk
# in the OVF (and from there, find the associated Items)
# In the case where no file_id is specified, we may default to the
# filename, so check that instead
if file_id is not None:
(file2, disk2,
ctrlitem2, diskitem2) = vm.search_from_file_id(file_id)
else:
(file2, disk2,
ctrlitem2, diskitem2) = vm.search_from_file_id(disk_file)
# 3) Check whether the --controller and --address match existing Items
# in the OVF (and from there, find the associated Disk and/or File)
(file3, disk3, ctrlitem3, diskitem3) = vm.search_from_controller(
controller, address)
file_obj = check_for_conflict("File to overwrite", [file1, file2, file3])
disk_obj = check_for_conflict("Disk to overwrite", [disk1, disk2, disk3])
ctrl_item = check_for_conflict("controller Item to use",
[ctrlitem1, ctrlitem2, ctrlitem3])
disk_item = check_for_conflict("disk Item to overwrite",
[diskitem1, diskitem2, diskitem3])
return file_obj, disk_obj, ctrl_item, disk_item
def guess_controller_type(platform, ctrl_item, drive_type):
"""If a controller type wasn't specified, try to guess from context.
Args:
platform (Platform): Platform instance to guess controller for
ctrl_item (object): Any known controller object, or None
drive_type (str): "cdrom" or "harddisk"
Returns:
str: 'ide', 'sata', or 'scsi'
Raises:
ValueUnsupportedError: if ``ctrl_item`` is not None but is also not
an IDE, SATA, or SCSI controller device.
Examples:
::
>>> from COT.platforms import Platform
>>> guess_controller_type(Platform(), None, 'harddisk')
'ide'
"""
if ctrl_item is None:
# If the user didn't tell us which controller type they wanted,
# and we didn't find a controller item based on existing file/disk,
# then we need to guess which type of controller we need,
# based on the platform and the disk drive type.
ctrl_type = platform.controller_type_for_device(drive_type)
logger.warning("Controller type not specified - guessing it should be"
" %s based on disk drive type %s and platform %s",
ctrl_type, drive_type, platform)
else:
ctrl_type = ctrl_item.hardware_type
if ctrl_type not in ['ide', 'sata', 'scsi']:
raise ValueUnsupportedError("controller ResourceType",
ctrl_type,
"'ide', 'sata', or 'scsi'")
logger.notice("Controller type not specified - using"
" '%s' based on existing Item", ctrl_type)
return ctrl_type
def validate_elements(vm, file_obj, disk_obj, disk_item, ctrl_item,
file_id, ctrl_type):
"""Validate any existing file, disk, controller item, and disk item.
Raises:
ValueMismatchError: if the search criteria select a non-unique set.
Args:
vm (VMDescription): Virtual machine object
file_obj (object): Known file object
disk_obj (object): Known disk object
disk_item (object): Known disk device object
ctrl_item (object): Known controller device object
file_id (str): File identifier string
ctrl_type (str): Controller type ("ide", "sata", or "scsi")
"""
# Ok, we now have confirmed that we have at most one of each of these
# four objects. Now it's time for some sanity checking...
if file_obj is not None:
if file_id is not None:
match_or_die("File id", vm.get_id_from_file(file_obj),
"--file-id", file_id)
# Should never fail this test if the above logic was sound...
if disk_obj is not None:
match_or_die("File id", vm.get_id_from_file(file_obj),
"Disk fileRef", vm.get_file_ref_from_disk(disk_obj))
if disk_obj is not None:
if file_id is not None:
match_or_die("Disk fileRef", vm.get_file_ref_from_disk(disk_obj),
"--file-id", file_id)
if file_obj is None:
# This will happen if we're replacing a placeholder entry
# (disk exists but has no associated file)
logger.verbose("Found Disk but not File - maybe placeholder?")
if disk_item is not None:
vm.check_sanity_of_disk_device(disk_obj, file_obj,
disk_item, ctrl_item)
if ctrl_item is not None:
match_or_die("controller type",
ctrl_item.hardware_type,
"--controller", ctrl_type)
# Whew! Everything looks sane!
logger.debug("Validation of existing data complete")
def confirm_elements(vm, ui, file_obj, disk_image, disk_obj, disk_item,
drive_type, controller, ctrl_item, subtype):
"""Get user confirmation of any risky or unusual operations.
Args:
vm (VMDescription): Virtual machine object
ui (UI): User interface object
file_obj (object): Known file object
disk_image (str): Filename or path for disk file
disk_obj (object): Known disk object
disk_item (object): Known disk device object
drive_type (str): "harddisk" or "cdrom"
controller (str): Controller type ("ide", "sata", or "scsi")
ctrl_item (object): Known controller device object
subtype (str): Controller subtype (such as "virtio")
"""
# TODO: more refactoring!
if file_obj is not None:
ui.confirm_or_die("Replace existing file {0} with {1}?"
.format(vm.get_path_from_file(file_obj),
disk_image))
logger.notice("Overwriting existing File in OVF")
if file_obj is None and (disk_obj is not None or disk_item is not None):
ui.confirm_or_die(
"Add disk file to existing (but empty) {0} drive?"
.format(drive_type))
if disk_obj is not None:
logger.notice("Overwriting existing Disk in OVF")
if disk_item is not None:
if disk_item.hardware_type != drive_type:
ui.confirm_or_die(
"Existing disk Item is a {0}. Change it to a {1}?"
.format(disk_item.hardware_type,
drive_type))
# We'll overwrite the existing disk Item instead of deleting
# and recreating it, in order to preserve things like Description
logger.notice("Overwriting existing disk Item in OVF")
if ctrl_item is not None:
if subtype is not None:
curr_subtype = ctrl_item.hardware_subtype
if curr_subtype is not None and curr_subtype != subtype:
ui.confirm_or_die("Change {0} controller subtype from "
"'{1}' to '{2}'?".format(controller,
curr_subtype,
subtype))
else:
# In most cases we are NOT adding a new controller, so be safe...
ui.confirm_or_die("Add new {0} controller to OVF descriptor?"
.format(controller.upper()))
def add_disk_worker(vm,
ui,
disk_image,
drive_type=None,
file_id=None,
controller=None,
subtype=None,
address=None,
diskname=None,
description=None):
"""Worker function for actually adding the disk.
All parameters except ``vm``, ``ui``, and ``disk_image`` are optional
and will be automatically determined by COT if unspecified.
Args:
vm (VMDescription): The virtual machine being edited.
ui (UI): User interface in effect.
disk_image (DiskRepresentation): Disk image to add to the VM.
drive_type (str): Disk drive type: ``'cdrom'`` or ``'harddisk'``.
If not specified, will be derived automatically from the
disk_image file name extension.
file_id (str): Identifier of the disk file in the VM. If not
specified, the VM will automatically derive an appropriate value.
controller (str): Disk controller type: "ide" or "sata" or "scsi".
If not specified, will be derived from the `type` and the
`platform` of the given `vm`.
subtype (str): Controller subtype ('virtio', 'lsilogic', etc.)
address (str): Disk device address on its controller
(such as ``'1:0'``). If this matches an existing disk device,
that device will be overwritten. If not specified, the first
available address not already occupied by an existing device
will be selected.
diskname (str): Name for disk device
description (str): Description of disk device
"""
if drive_type is None:
drive_type = disk_image.predicted_drive_type
logger.warning("New disk drive type not specified, guessing it should "
"be '%s' based on file type", drive_type)
# Convert the disk to a new format if needed...
disk_image = vm.convert_disk_if_needed(disk_image, drive_type)
disk_filename = os.path.basename(disk_image.path)
(file_obj, disk, ctrl_item, disk_item) = \
search_for_elements(vm, disk_filename, file_id, controller, address)
if controller is None:
controller = guess_controller_type(vm.platform, ctrl_item, drive_type)
if ctrl_item is None and address is None:
# We didn't find a specific controller from the user info,
# but also the user didn't request a specific controller.
# So try and just look for any controller of the right type
(ctrl_item, address) = vm.find_open_controller(controller)
validate_elements(vm, file_obj, disk, disk_item, ctrl_item,
file_id, controller)
confirm_elements(vm, ui, file_obj, disk_image.path, disk, disk_item,
drive_type, controller, ctrl_item, subtype)
# OK - let's add things!
if file_id is None and file_obj is not None:
file_id = vm.get_id_from_file(file_obj)
if file_id is None and disk is not None:
file_id = vm.get_file_ref_from_disk(disk)
if file_id is None:
file_id = disk_filename
# First, the File
file_obj = vm.add_file(disk_image.path, file_id, file_obj, disk)
# Next, the Disk
disk = vm.add_disk(disk_image, file_id, drive_type, disk)
# Next, the controller (if needed)
if address is not None:
ctrl_addr = address.split(":")[0]
disk_addr = address.split(":")[1]
else:
# let VM choose controller address if necessary
ctrl_addr = None
disk_addr = None
if ctrl_item is None and subtype is None:
# Look for any existing controller of this type;
# if found, re-use its subtype for consistency
logger.verbose("Looking for subtype of existing controllers")
subtype = vm.get_common_subtype(controller)
ctrl_item = vm.add_controller_device(controller, subtype,
ctrl_addr, ctrl_item)
# Finally, the disk Item
vm.add_disk_device(drive_type, disk_addr, diskname,
description, disk, file_obj, ctrl_item, disk_item)
command_classes.append(COTAddDisk)
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
|
[
"[email protected]"
] | |
8ea3119829e7f8014ee5ff896e439e31e5bef8d9
|
5a7a3447d434a458a7bb63f2aa11b64c284d5492
|
/Data_storage/ini/email_conf/email_RW.py
|
06351a2a740c5d3a12b234683e772c500589dd6e
|
[] |
no_license
|
woshimayi/mypython
|
35792e12036a7a05f12d3ef7006637b2b03f0e2e
|
7f1eb38e8585bf6d2f21d3ad0f64dace61425875
|
refs/heads/master
| 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 |
HTML
|
UTF-8
|
Python
| false | false | 5,166 |
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
* @FilePath: email_RW.py
* @version: (C) Copyright 2010-2049, Node Supply Chain Manager Corporation Limited.
* @Author: dof
* @Date: 2022/2/20 17:18
* @LastEditors: sueRimn
* @LastEditTime: 2022/2/20 17:18
* @Descripttion:
'''
# !/usr/bin/env python
# encoding: utf-8
'''
@author: caopeng
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: [email protected]
@software: garner
@file: test_dof.py
@time: 2021/2/22 15:37
@desc: ini file read write
'''
import configparser
'''
# write ini file
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
# read ini file
config = configparser.ConfigParser()
print('sections')
print('1', config.sections())
print('2', config.read('example.ini'))
print('3', config.sections())
print('4', ('bitbucket.org' in config))
print('5', ('bytebong.com' in config))
print('6', config['bitbucket.org']['User'])
print('7', config['DEFAULT']['Compression'])
topsecret = config['topsecret.server.com']
print('8', topsecret['ForwardX11'])
print('9', topsecret['Port'])
for key in config['bitbucket.org']:
print('10', key)
for key in config['topsecret.server.com']:
print('12', key, config['topsecret.server.com'][key])
print('11', config['bitbucket.org']['ForwardX11'])
# -sections得到所有的section,并以列表的形式返回
print('sections:', ' ', config.sections())
# -options(section)得到该section的所有option
print('options:', ' ', config.options('bitbucket.org'))
# -items(section)得到该section的所有键值对
print('items:', ' ', config.items('bitbucket.org'))
# -get(section,option)得到section中option的值,返回为string类型
print('get:', ' ', config.get('bitbucket.org', 'user'))
# 首先得到配置文件的所有分组,然后根据分组逐一展示所有
for sections in config.sections():
for items in config.items(sections):
print(items, items[0], items[1])
# add section
config = configparser.ConfigParser()
config.add_section('type')
config.set('type', 'stun', 'bool')
with open('example.ini', 'a') as configfile:
config.write(configfile)
# remove section option
config = configparser.ConfigParser()
print('2', config.read('example.ini'))
# config.remove_option('bitbucket.org', 'user')
# config.remove_section('bitbucket.org')
config.write(open('example.ini', 'w'))
'''
class Email_operate(object):
"""docstring for Email_operate"""
def __init__(self, file):
print("open conf file: ", file)
super(Email_operate, self).__init__()
self.file = file
self.config = configparser.ConfigParser()
err = self.config.read(self.file)
print("err = ", err)
if 0 == len(err):
print("err = ssss")
self.config['Global'] = {}
self.config['send'] = {'mail': '',
'user': '',
'password': ''}
self.config['recv'] = {'user': ''}
with open(self.file, 'w') as configfile:
self.config.write(configfile)
def read(self, section, key):
try:
if section in self.config:
return self.config[section][key]
except:
pass
def write(self, section, key, value):
try:
if section in self.config:
self.config[section][key] = value
else:
self.config.add_section[section]
self.config.set(section, key, value)
with open(self.file, 'w') as configfile:
self.config.write(configfile)
except:
pass
def show(self):
for sections in self.config.sections():
print("[%s]" % sections)
for items in self.config.items(sections):
print("%s = %s" % (items[0], items[1]))
print()
def read_mail(self):
C.read("send", "mail")
pass
def read_user(self):
C.read("send", "user")
pass
def read_pass(self):
C.read("send", "password")
pass
def write_mail(self):
pass
def write_user(self):
pass
def write_pass(self):
pass
def __del__(self):
print("end ... ")
if __name__ == '__main__':
print('Hello world')
C = Email_operate("email.ini")
C.show()
print("user = zzz", C.read("send", "user"))
C.write("recv", "pass", "ssssss")
C.show()
|
[
"[email protected]"
] | |
506663c388e8487f7280510f6f0aae4d6612f44c
|
97a09265d7898765a3f561c1b4a12e5b46346db8
|
/Python/numpy/Arrays.py
|
7bf0675732e562772204d7634dbfbc15ab6a4a48
|
[] |
no_license
|
14E47/Hackerrank
|
35e7b5520fe00ae98377624b8429d42d237cbd46
|
c2af2fa7ee49c2a94304ee543900425f5a3b6551
|
refs/heads/master
| 2020-03-26T21:22:45.492630 | 2019-10-04T03:37:14 | 2019-10-04T03:37:14 | 145,384,365 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 87 |
py
|
import numpy
# s = map(int,input().split())
a = numpy.array([1,2,3,4,5])
print(a[0])
|
[
"[email protected]"
] | |
b6d966b94063b30ff0e899960b8584eecd45215b
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/paloalto_wildfire/komand_paloalto_wildfire/actions/get_pcap/__init__.py
|
903787ad81d8a8ca9703d0f9d4e7e5f902d0843c
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 |
MIT
| 2023-09-14T08:47:37 | 2019-06-05T17:05:12 |
Python
|
UTF-8
|
Python
| false | false | 68 |
py
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import GetPcap
|
[
"[email protected]"
] | |
139c2b790b42e35160ff579c230c6aaf06592b0f
|
304926837d94f37ef33c46b8f3c71ecfac4690e8
|
/2.8_number_eight.py
|
811908108a2f301d97a7a3a5fc3092a6c35bf496
|
[] |
no_license
|
ver0nika4ka/PythonCrashCourse
|
1015d207d9da1b0f9efaee3acc502d2757880f33
|
6bde3b716deb86d022da5cb478c0a95505fe5acc
|
refs/heads/master
| 2021-07-12T17:24:16.478133 | 2021-06-17T03:27:24 | 2021-06-17T03:27:24 | 246,993,773 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
addition = f"{5+3}"
substraction = f"{9-1}"
multipl = f"{2*4}"
division = f"{16/2}"
print(f"The result is:\n{addition}\n{substraction}\n{multipl}\n{division}")
# same as above, but you can write in one line
print(f"{5+3}\n{9-1}\n{2*4}\n{16/2}")
|
[
"[email protected]"
] | |
491d75489dc73b64d619f63effa5b9d9ade79f7f
|
846a7668ac964632bdb6db639ab381be11c13b77
|
/android/test/vts/testcases/host/camera/conventional/2_1/SampleCameraV2Test.py
|
a9b0087d3b858e0c6647feb0e5a3b5ddce105fa4
|
[] |
no_license
|
BPI-SINOVOIP/BPI-A64-Android8
|
f2900965e96fd6f2a28ced68af668a858b15ebe1
|
744c72c133b9bf5d2e9efe0ab33e01e6e51d5743
|
refs/heads/master
| 2023-05-21T08:02:23.364495 | 2020-07-15T11:27:51 | 2020-07-15T11:27:51 | 143,945,191 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,445 |
py
|
#!/usr/bin/env python
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
from vts.runners.host import asserts
from vts.runners.host import base_test
from vts.runners.host import test_runner
class SampleCameraV2Test(base_test.BaseTestClass):
"""A sample testcase for the non-HIDL, conventional Camera HAL."""
# Camera HAL version value (v2.1).
VERSION_2_1 = 0x201
VERSION_2_4 = 0x204
MAX_RETRIES = 5
def setUpClass(self):
self.dut = self.android_devices[0]
self.dut.hal.InitConventionalHal(
target_type="camera",
target_version=2.1,
target_basepaths=["/system/lib/hw"],
bits=32,
target_package="hal.conventional.camera")
def setUp(self):
self.call_count_camera_device_status_change = 0
self.call_count_torch_mode_status_change = 0
def testCameraNormal(self):
"""A simple testcase which just emulates a normal usage pattern."""
version = self.dut.hal.camera.common.GetAttributeValue(
"module_api_version")
logging.info("version: %s", hex(version))
if version != self.VERSION_2_1 and version != self.VERSION_2_4:
asserts.skip("HAL version %s is neither v2.1 nor v2.4" % version)
result = self.dut.hal.camera.get_number_of_cameras()
count = result.return_type.scalar_value.int32_t
logging.info("# of found cameras: %s", count)
asserts.assertTrue(count > 0, "no camera found")
for index in range(0, count):
arg = self.dut.hal.camera.camera_info_t(facing=0)
logging.info(self.dut.hal.camera.get_camera_info(index, arg))
# uncomment when undefined function is handled gracefully.
# self.dut.hal.camera.init()
def camera_device_status_change(callbacks, camera_id, new_status):
self.call_count_camera_device_status_change += 1
logging.info("camera_device_status_change")
logging.info("camera_device_status_change: camera_id = %s",
camera_id)
logging.info("camera_device_status_change: new_status = %s",
new_status)
logging.info("camera_device_status_change: callbacks = %s",
callbacks)
def torch_mode_status_change(callbacks, camera_id, new_status):
self.profiling.StopHostProfiling(
"callback_latency_torch_mode_status_change")
self.call_count_torch_mode_status_change += 1
logging.info("torch_mode_status_change")
logging.info("torch_mode_status_change: camera_id = %s", camera_id)
logging.info("torch_mode_status_change: new_status = %s",
new_status)
logging.info("torch_mode_status_change: callbacks = %s", callbacks)
my_callback = self.dut.hal.camera.camera_module_callbacks_t(
camera_device_status_change, torch_mode_status_change)
self.dut.hal.camera.set_callbacks(my_callback)
self.profiling.StartHostProfiling(
"callback_latency_torch_mode_status_change")
self.dut.hal.camera.common.methods.open() # note args are skipped
retries = 0
while (self.call_count_torch_mode_status_change < 1 and
retries < self.MAX_RETRIES):
logging.info("waiting %s %s",
self.call_count_camera_device_status_change,
self.call_count_torch_mode_status_change)
time.sleep(1)
retries += 1
if self.call_count_torch_mode_status_change < 1:
# The above callback was not always called (~50% of chance).
logging.error("Callback not called within %s seconds",
self.MAX_RETRIES)
if __name__ == "__main__":
test_runner.main()
|
[
"[email protected]"
] | |
06bd0eeeed12d227a0e832205e942acba3b8c52f
|
a15a7dcb2ba3880a75309dba66e718be7ca964b7
|
/st2tests/integration/orquesta/test_wiring_error_handling.py
|
8bd0218dd3110abe76d8f7235c15420dca021652
|
[
"Apache-2.0"
] |
permissive
|
alexiono/st2
|
dfb6a9b2c6d00023771ff626883d9631e586fc06
|
a2bf25e085dfc9d2d407e8160a2febd48e5a4920
|
refs/heads/master
| 2020-04-02T00:48:03.440760 | 2018-10-18T17:13:56 | 2018-10-18T17:13:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,356 |
py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from integration.orquesta import base
from st2common.constants import action as ac_const
class ErrorHandlingTest(base.TestWorkflowExecution):
def test_inspection_error(self):
expected_errors = [
{
'type': 'content',
'message': 'The action "std.noop" is not registered in the database.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task3.action'
},
{
'type': 'context',
'language': 'yaql',
'expression': '<% ctx().foobar %>',
'message': 'Variable "foobar" is referenced before assignment.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input',
'spec_path': 'tasks.task1.input',
},
{
'type': 'expression',
'language': 'yaql',
'expression': '<% <% succeeded() %>',
'message': (
'Parse error: unexpected \'<\' at '
'position 0 of expression \'<% succeeded()\''
),
'schema_path': (
'properties.tasks.patternProperties.^\w+$.'
'properties.next.items.properties.when'
),
'spec_path': 'tasks.task2.next[0].when'
},
{
'type': 'syntax',
'message': '[{\'cmd\': \'echo <% ctx().macro %>\'}] is not of type \'object\'',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input.type',
'spec_path': 'tasks.task2.input'
}
]
ex = self._execute_workflow('examples.orquesta-fail-inspection')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_input_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-input-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_vars_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-vars-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_start_task_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"', 'task_id': 'task1'}]
ex = self._execute_workflow('examples.orquesta-fail-start-task')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_transition_error(self):
expected_errors = [
{
'message': (
'Unable to resolve key \'value\' in expression \''
'<% succeeded() and result().value %>\' from context.'
),
'task_transition_id': 'task2__0',
'task_id': 'task1'
}
]
ex = self._execute_workflow('examples.orquesta-fail-task-transition')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_publish_error(self):
expected_errors = [
{
'message': (
'Unable to resolve key \'value\' in expression \''
'<% result().value %>\' from context.'
),
'task_transition_id': 'task2__0',
'task_id': 'task1'
}
]
ex = self._execute_workflow('examples.orquesta-fail-task-publish')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_output_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-output-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_content_errors(self):
expected_errors = [
{
'type': 'content',
'message': 'The action reference "echo" is not formatted correctly.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task1.action'
},
{
'type': 'content',
'message': 'The action "core.echoz" is not registered in the database.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task2.action'
},
{
'type': 'content',
'message': 'Action "core.echo" is missing required input "message".',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input',
'spec_path': 'tasks.task3.input'
},
{
'type': 'content',
'message': 'Action "core.echo" has unexpected input "messages".',
'schema_path': (
'properties.tasks.patternProperties.^\w+$.properties.input.'
'patternProperties.^\w+$'
),
'spec_path': 'tasks.task3.input.messages'
}
]
ex = self._execute_workflow('examples.orquesta-fail-inspection-task-contents')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
|
[
"[email protected]"
] | |
c9e883fa698c1a3aefc67747af1fc68a37696834
|
891aba394df57d7894900e99e5881ad5817a84bd
|
/s23/23.4.1_readlines_v2.py
|
70870c5131582b1b2ae4b22f6171b7b1876f9546
|
[] |
no_license
|
feliperojas/mision_tic_G11
|
42d87e698eb8c9ace896805f5fc5436a0035ec3b
|
cfc41e873a4138f3f4f2ad63143042eb606c0f45
|
refs/heads/master
| 2023-05-28T09:29:40.247531 | 2021-06-09T16:43:45 | 2021-06-09T16:43:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 301 |
py
|
contador = 1
with open("C:/Users/Camilo/Desktop/reportes/reporte2.txt","r") as archivo:
data = archivo.readlines()
print("El archivo tiene",len(data), "lineas")
for linea in data:
print(f"En la linea {contador} esta es la informacion: {linea}", end="")
print()
contador+=1
|
[
"[email protected]"
] | |
d094c0aaa88325a814350474f23b955acbc5a666
|
581d96e72cf7608ada7564db5fad3fde78f3644c
|
/test/test_serialization.py
|
7875dc3037e4308c3283912b3529a08f6608ba01
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
heitorschueroff/pytorch
|
dee92c2d2991f2ee0821c438a245dc704eb52b10
|
6f95850127e4659f857116c8205f03f66ab5a711
|
refs/heads/master
| 2023-08-11T11:47:06.710438 | 2021-08-02T17:24:37 | 2021-08-02T17:26:54 | 350,848,572 | 1 | 0 |
NOASSERTION
| 2021-03-23T20:29:19 | 2021-03-23T20:29:18 | null |
UTF-8
|
Python
| false | false | 30,719 |
py
|
import torch
import unittest
import io
import tempfile
import os
import sys
import zipfile
import warnings
import gzip
import copy
import pickle
import shutil
import pathlib
from torch._utils_internal import get_file_path_2
from torch._utils import _rebuild_tensor
from torch.serialization import check_module_version_greater_or_equal
from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \
TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName
from torch.testing._internal.common_device_type import instantiate_device_type_tests
# These tests were all copied from `test/test_torch.py` at some point, so see
# the actual blame, see this revision
# https://github.com/pytorch/pytorch/blame/9a2691f2fc948b9792686085b493c61793c2de30/test/test_torch.py
if TEST_DILL:
import dill
HAS_DILL_AT_LEAST_0_3_1 = check_module_version_greater_or_equal(dill, (0, 3, 1))
else:
HAS_DILL_AT_LEAST_0_3_1 = False
can_retrieve_source = True
with warnings.catch_warnings(record=True) as warns:
with tempfile.NamedTemporaryFile() as checkpoint:
x = torch.save(torch.nn.Module(), checkpoint)
for warn in warns:
if "Couldn't retrieve source code" in warn.message.args[0]:
can_retrieve_source = False
break
class FilelikeMock(object):
def __init__(self, data, has_fileno=True, has_readinto=False):
if has_readinto:
self.readinto = self.readinto_opt
if has_fileno:
# Python 2's StringIO.StringIO has no fileno attribute.
# This is used to test that.
self.fileno = self.fileno_opt
self.calls = set()
self.bytesio = io.BytesIO(data)
def trace(fn, name):
def result(*args, **kwargs):
self.calls.add(name)
return fn(*args, **kwargs)
return result
for attr in ['read', 'readline', 'seek', 'tell', 'write', 'flush']:
traced_fn = trace(getattr(self.bytesio, attr), attr)
setattr(self, attr, traced_fn)
def fileno_opt(self):
raise io.UnsupportedOperation('Not a real file')
def readinto_opt(self, view):
self.calls.add('readinto')
return self.bytesio.readinto(view)
def was_called(self, name):
return name in self.calls
class SerializationMixin(object):
def _test_serialization_data(self):
a = [torch.randn(5, 5).float() for i in range(2)]
b = [a[i % 2] for i in range(4)] # 0-3
b += [a[0].storage()] # 4
b += [a[0].reshape(-1)[1:4].storage()] # 5
b += [torch.arange(1, 11).int()] # 6
t1 = torch.FloatTensor().set_(a[0].reshape(-1)[1:4].clone().storage(), 0, (3,), (1,))
t2 = torch.FloatTensor().set_(a[0].reshape(-1)[1:4].clone().storage(), 0, (3,), (1,))
b += [(t1.storage(), t1.storage(), t2.storage())] # 7
b += [a[0].reshape(-1)[0:2].storage()] # 8
return b
def _test_serialization_assert(self, b, c):
self.assertEqual(b, c, atol=0, rtol=0)
self.assertTrue(isinstance(c[0], torch.FloatTensor))
self.assertTrue(isinstance(c[1], torch.FloatTensor))
self.assertTrue(isinstance(c[2], torch.FloatTensor))
self.assertTrue(isinstance(c[3], torch.FloatTensor))
self.assertTrue(isinstance(c[4], torch.FloatStorage))
c[0].fill_(10)
self.assertEqual(c[0], c[2], atol=0, rtol=0)
self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), atol=0, rtol=0)
c[1].fill_(20)
self.assertEqual(c[1], c[3], atol=0, rtol=0)
# I have to do it in this roundabout fashion, because there's no
# way to slice storages
for i in range(4):
self.assertEqual(c[4][i + 1], c[5][i])
# check that serializing the same storage view object unpickles
# it as one object not two (and vice versa)
views = c[7]
self.assertEqual(views[0]._cdata, views[1]._cdata)
self.assertEqual(views[0], views[2])
self.assertNotEqual(views[0]._cdata, views[2]._cdata)
rootview = c[8]
self.assertEqual(rootview.data_ptr(), c[0].data_ptr())
def test_serialization_zipfile_utils(self):
data = {
'a': b'12039810948234589',
'b': b'1239081209484958',
'c/d': b'94589480984058'
}
def test(name_or_buffer):
with torch.serialization._open_zipfile_writer(name_or_buffer) as zip_file:
for key in data:
zip_file.write_record(key, data[key], len(data[key]))
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
with torch.serialization._open_zipfile_reader(name_or_buffer) as zip_file:
for key in data:
actual = zip_file.get_record(key)
expected = data[key]
self.assertEqual(expected, actual)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
test(io.BytesIO())
def test_serialization(self):
# Test serialization with a real file
b = self._test_serialization_data()
with tempfile.NamedTemporaryFile() as f:
torch.save(b, f)
f.seek(0)
c = torch.load(f)
self._test_serialization_assert(b, c)
with TemporaryFileName() as fname:
torch.save(b, fname)
c = torch.load(fname)
self._test_serialization_assert(b, c)
# test non-ascii encoding of bytes arrays/strings
# The following bytes are produced by serializing
# [b'\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85\xc5\xbc', torch.zeros(1, dtype=torch.float), 2]
# in Python 2.7.12 and PyTorch 0.4.1, where the first element contains
# bytes of some utf-8 characters (i.e., `utf8_str.encode('utf-8')`).
serialized = (
b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03.'
b'\x80\x02}q\x01(U\x10protocol_versionq\x02M\xe9\x03U\n'
b'type_sizesq\x03}q\x04(U\x03intq\x05K\x04U\x05shortq\x06K\x02U'
b'\x04longq\x07K\x04uU\rlittle_endianq\x08\x88u.\x80\x02]q'
b'\x01(U\x0e\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85'
b'\xc5\xbcq\x02ctorch._utils\n_rebuild_tensor_v2\nq\x03((U'
b'\x07storageq\x04ctorch\nFloatStorage\nq\x05U\x0845640624q'
b'\x06U\x03cpuq\x07\x8a\x01\x01NtQK\x00K\x01\x85K\x01\x85'
b'\x89NtRq\x08K\x02e.\x80\x02]q\x01U\x0845640624q\x02a.\x01\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
)
buf = io.BytesIO(serialized)
utf8_bytes = b'\xc5\xbc\xc4\x85\xc4\x85\xc3\xb3\xc5\xbc\xc4\x85\xc5\xbc'
utf8_str = utf8_bytes.decode('utf-8')
loaded_utf8 = torch.load(buf, encoding='utf-8')
self.assertEqual(loaded_utf8, [utf8_str, torch.zeros(1, dtype=torch.float), 2])
buf.seek(0)
loaded_bytes = torch.load(buf, encoding='bytes')
self.assertEqual(loaded_bytes, [utf8_bytes, torch.zeros(1, dtype=torch.float), 2])
def test_serialization_filelike(self):
# Test serialization (load and save) with a filelike object
b = self._test_serialization_data()
with BytesIOContext() as f:
torch.save(b, f)
f.seek(0)
c = torch.load(f)
self._test_serialization_assert(b, c)
def test_serialization_fake_zip(self):
data = [
ord('P'),
ord('K'),
5,
6
]
for i in range(0, 100):
data.append(0)
t = torch.tensor(data, dtype=torch.uint8)
with tempfile.NamedTemporaryFile() as f:
torch.save(t, f)
# If this check is False for all Python versions (i.e. the fix
# has been backported), this test and torch.serialization._is_zipfile
# can be deleted
self.assertTrue(zipfile.is_zipfile(f))
self.assertFalse(torch.serialization._is_zipfile(f))
f.seek(0)
self.assertEqual(torch.load(f), t)
def test_serialization_gzip(self):
# Test serialization with gzip file
b = self._test_serialization_data()
f1 = tempfile.NamedTemporaryFile(delete=False)
f2 = tempfile.NamedTemporaryFile(delete=False)
torch.save(b, f1)
with open(f1.name, 'rb') as f_in, gzip.open(f2.name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(f2.name, 'rb') as f:
c = torch.load(f)
self._test_serialization_assert(b, c)
@unittest.skipIf(
not TEST_DILL or HAS_DILL_AT_LEAST_0_3_1,
'"dill" not found or is correct version'
)
def test_serialization_dill_version_not_supported(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
with self.assertRaisesRegex(ValueError, 'supports dill >='):
torch.save(x, f, pickle_module=dill)
f.seek(0)
with self.assertRaisesRegex(ValueError, 'supports dill >='):
x2 = torch.load(f, pickle_module=dill, encoding='utf-8')
@unittest.skipIf(
not TEST_DILL or not HAS_DILL_AT_LEAST_0_3_1,
'"dill" not found or not correct version'
)
def test_serialization_dill(self):
x = torch.randn(5, 5)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f, pickle_module=dill)
f.seek(0)
x2 = torch.load(f, pickle_module=dill, encoding='utf-8')
self.assertIsInstance(x2, type(x))
self.assertEqual(x, x2)
f.seek(0)
x3 = torch.load(f, pickle_module=dill)
self.assertIsInstance(x3, type(x))
self.assertEqual(x, x3)
def test_serialization_offset_gzip(self):
a = torch.randn(5, 5)
i = 41
f1 = tempfile.NamedTemporaryFile(delete=False)
f2 = tempfile.NamedTemporaryFile(delete=False)
with open(f1.name, 'wb') as f:
pickle.dump(i, f)
torch.save(a, f)
with open(f1.name, 'rb') as f_in, gzip.open(f2.name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(f2.name, 'rb') as f:
j = pickle.load(f)
b = torch.load(f)
self.assertTrue(torch.equal(a, b))
self.assertEqual(i, j)
def test_serialization_sparse(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse()
with tempfile.NamedTemporaryFile() as f:
torch.save({"tensor": x}, f)
f.seek(0)
y = torch.load(f)
self.assertEqual(x, y["tensor"])
def test_serialization_sparse_invalid(self):
x = torch.zeros(3, 3)
x[1][1] = 1
x = x.to_sparse()
class TensorSerializationSpoofer(object):
def __init__(self, tensor):
self.tensor = tensor
def __reduce_ex__(self, proto):
invalid_indices = self.tensor._indices().clone()
invalid_indices[0][0] = 3
return (
torch._utils._rebuild_sparse_tensor,
(
self.tensor.layout,
(
invalid_indices,
self.tensor._values(),
self.tensor.size())))
with tempfile.NamedTemporaryFile() as f:
torch.save({"spoofed": TensorSerializationSpoofer(x)}, f)
f.seek(0)
with self.assertRaisesRegex(
RuntimeError,
"size is inconsistent with indices"):
y = torch.load(f)
def test_serialize_device(self):
device_str = ['cpu', 'cpu:0', 'cuda', 'cuda:0']
device_obj = [torch.device(d) for d in device_str]
for device in device_obj:
device_copied = copy.deepcopy(device)
self.assertEqual(device, device_copied)
def test_serialization_backwards_compat(self):
a = [torch.arange(1 + i, 26 + i).view(5, 5).float() for i in range(2)]
b = [a[i % 2] for i in range(4)]
b += [a[0].storage()]
b += [a[0].reshape(-1)[1:4].clone().storage()]
path = download_file('https://download.pytorch.org/test_data/legacy_serialized.pt')
c = torch.load(path)
self.assertEqual(b, c, atol=0, rtol=0)
self.assertTrue(isinstance(c[0], torch.FloatTensor))
self.assertTrue(isinstance(c[1], torch.FloatTensor))
self.assertTrue(isinstance(c[2], torch.FloatTensor))
self.assertTrue(isinstance(c[3], torch.FloatTensor))
self.assertTrue(isinstance(c[4], torch.FloatStorage))
c[0].fill_(10)
self.assertEqual(c[0], c[2], atol=0, rtol=0)
self.assertEqual(c[4], torch.FloatStorage(25).fill_(10), atol=0, rtol=0)
c[1].fill_(20)
self.assertEqual(c[1], c[3], atol=0, rtol=0)
# test some old tensor serialization mechanism
class OldTensorBase(object):
def __init__(self, new_tensor):
self.new_tensor = new_tensor
def __getstate__(self):
return (self.new_tensor.storage(),
self.new_tensor.storage_offset(),
tuple(self.new_tensor.size()),
self.new_tensor.stride())
class OldTensorV1(OldTensorBase):
def __reduce__(self):
return (torch.Tensor, (), self.__getstate__())
class OldTensorV2(OldTensorBase):
def __reduce__(self):
return (_rebuild_tensor, self.__getstate__())
x = torch.randn(30).as_strided([2, 3], [9, 3], 2)
for old_cls in [OldTensorV1, OldTensorV2]:
with tempfile.NamedTemporaryFile() as f:
old_x = old_cls(x)
torch.save(old_x, f)
f.seek(0)
load_x = torch.load(f)
self.assertEqual(x.storage(), load_x.storage())
self.assertEqual(x.storage_offset(), load_x.storage_offset())
self.assertEqual(x.size(), load_x.size())
self.assertEqual(x.stride(), load_x.stride())
def test_serialization_save_warnings(self):
with warnings.catch_warnings(record=True) as warns:
with tempfile.NamedTemporaryFile() as checkpoint:
x = torch.save(torch.nn.Linear(2, 3), checkpoint)
self.assertEquals(len(warns), 0)
def test_serialization_map_location(self):
test_file_path = download_file('https://download.pytorch.org/test_data/gpu_tensors.pt')
def map_location(storage, loc):
return storage
def load_bytes():
with open(test_file_path, 'rb') as f:
return io.BytesIO(f.read())
fileobject_lambdas = [lambda: test_file_path, load_bytes]
cpu_map_locations = [
map_location,
{'cuda:0': 'cpu'},
'cpu',
torch.device('cpu'),
]
gpu_0_map_locations = [
{'cuda:0': 'cuda:0'},
'cuda',
'cuda:0',
torch.device('cuda'),
torch.device('cuda', 0)
]
gpu_last_map_locations = [
'cuda:{}'.format(torch.cuda.device_count() - 1),
]
def check_map_locations(map_locations, tensor_class, intended_device):
for fileobject_lambda in fileobject_lambdas:
for map_location in map_locations:
tensor = torch.load(fileobject_lambda(), map_location=map_location)
self.assertEqual(tensor.device, intended_device)
self.assertIsInstance(tensor, tensor_class)
self.assertEqual(tensor, tensor_class([[1.0, 2.0], [3.0, 4.0]]))
check_map_locations(cpu_map_locations, torch.FloatTensor, torch.device('cpu'))
if torch.cuda.is_available():
check_map_locations(gpu_0_map_locations, torch.cuda.FloatTensor, torch.device('cuda', 0))
check_map_locations(
gpu_last_map_locations,
torch.cuda.FloatTensor,
torch.device('cuda', torch.cuda.device_count() - 1)
)
@unittest.skipIf(torch.cuda.is_available(), "Testing torch.load on CPU-only machine")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:0' restore location
# The following was generated by saving a torch.randn(2, device='cuda') tensor.
serialized = (b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9'
b'\x03.\x80\x02}q\x00(X\x10\x00\x00\x00protocol_versionq'
b'\x01M\xe9\x03X\r\x00\x00\x00little_endianq\x02\x88X\n'
b'\x00\x00\x00type_sizesq\x03}q\x04(X\x05\x00\x00\x00shortq'
b'\x05K\x02X\x03\x00\x00\x00intq\x06K\x04X\x04\x00\x00\x00'
b'longq\x07K\x04uu.\x80\x02ctorch._utils\n_rebuild_tensor_v2'
b'\nq\x00((X\x07\x00\x00\x00storageq\x01ctorch\nFloatStorage'
b'\nq\x02X\x0e\x00\x00\x0094919395964320q\x03X\x06\x00\x00'
b'\x00cuda:0q\x04K\x02Ntq\x05QK\x00K\x02\x85q\x06K\x01\x85q'
b'\x07\x89Ntq\x08Rq\t.\x80\x02]q\x00X\x0e\x00\x00\x00'
b'94919395964320q\x01a.\x02\x00\x00\x00\x00\x00\x00\x00\xbb'
b'\x1f\x82\xbe\xea\x81\xd1>')
buf = io.BytesIO(serialized)
error_msg = r'Attempting to deserialize object on a CUDA device'
with self.assertRaisesRegex(RuntimeError, error_msg):
_ = torch.load(buf)
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_api_requirements(self):
filemock = FilelikeMock(b'', has_readinto=False)
tensor = torch.randn(3, 5)
torch.save(tensor, filemock)
expected_superset = {'write', 'flush'}
self.assertTrue(expected_superset.issuperset(filemock.calls))
# Reset between save and load
filemock.seek(0)
filemock.calls.clear()
_ = torch.load(filemock)
expected_superset = {'read', 'readline', 'seek', 'tell'}
self.assertTrue(expected_superset.issuperset(filemock.calls))
def _test_serialization_filelike(self, tensor, mock, desc):
f = mock(b'')
torch.save(tensor, f)
f.seek(0)
data = mock(f.read())
msg = 'filelike serialization with {}'
b = torch.load(data)
self.assertTrue(torch.equal(tensor, b), msg.format(desc))
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_missing_attrs(self):
# Test edge cases where filelike objects are missing attributes.
# The Python io docs suggests that these attributes should really exist
# and throw io.UnsupportedOperation, but that isn't always the case.
mocks = [
('no readinto', lambda x: FilelikeMock(x)),
('has readinto', lambda x: FilelikeMock(x, has_readinto=True)),
('no fileno', lambda x: FilelikeMock(x, has_fileno=False)),
]
to_serialize = torch.randn(3, 10)
for desc, mock in mocks:
self._test_serialization_filelike(to_serialize, mock, desc)
@unittest.skipIf((3, 8, 0) <= sys.version_info < (3, 8, 2), "See https://bugs.python.org/issue39681")
def test_serialization_filelike_stress(self):
a = torch.randn(11 * (2 ** 9) + 1, 5 * (2 ** 9))
# This one should call python read multiple times
self._test_serialization_filelike(a, lambda x: FilelikeMock(x, has_readinto=False),
'read() stress test')
self._test_serialization_filelike(a, lambda x: FilelikeMock(x, has_readinto=True),
'readinto() stress test')
def test_serialization_filelike_uses_readinto(self):
# For maximum effiency, when reading a file-like object,
# ensure the C API calls readinto instead of read.
a = torch.randn(5, 4)
f = io.BytesIO()
torch.save(a, f)
f.seek(0)
data = FilelikeMock(f.read(), has_readinto=True)
b = torch.load(data)
self.assertTrue(data.was_called('readinto'))
def test_serialization_storage_slice(self):
# Generated using:
#
# t = torch.zeros(2);
# s1 = t.storage()[:1]
# s2 = t.storage()[1:]
# torch.save((s1, s2), 'foo.ser')
#
# with PyTorch 0.3.1
serialized = (b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03'
b'.\x80\x02}q\x00(X\n\x00\x00\x00type_sizesq\x01}q\x02(X\x03'
b'\x00\x00\x00intq\x03K\x04X\x05\x00\x00\x00shortq\x04K\x02X'
b'\x04\x00\x00\x00longq\x05K\x04uX\x10\x00\x00\x00protocol_versionq'
b'\x06M\xe9\x03X\r\x00\x00\x00little_endianq\x07\x88u.\x80\x02'
b'(X\x07\x00\x00\x00storageq\x00ctorch\nFloatStorage\nq\x01X\x0e'
b'\x00\x00\x0094279043900432q\x02X\x03\x00\x00\x00cpuq\x03K\x02'
b'X\x0e\x00\x00\x0094279029750368q\x04K\x00K\x01\x87q\x05tq\x06'
b'Q(h\x00h\x01X\x0e\x00\x00\x0094279043900432q\x07h\x03K\x02X'
b'\x0e\x00\x00\x0094279029750432q\x08K\x01K\x01\x87q\ttq\nQ'
b'\x86q\x0b.\x80\x02]q\x00X\x0e\x00\x00\x0094279043900432q'
b'\x01a.\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00')
buf = io.BytesIO(serialized)
(s1, s2) = torch.load(buf)
self.assertEqual(s1[0], 0)
self.assertEqual(s2[0], 0)
self.assertEqual(s1.data_ptr() + 4, s2.data_ptr())
def test_load_unicode_error_msg(self):
# This Pickle contains a Python 2 module with Unicode data and the
# loading should fail if the user explicitly specifies ascii encoding!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
self.assertRaises(UnicodeDecodeError, lambda: torch.load(path, encoding='ascii'))
def test_load_python2_unicode_module(self):
# This Pickle contains some Unicode data!
path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')
with warnings.catch_warnings(record=True) as w:
self.assertIsNotNone(torch.load(path))
def test_load_error_msg(self):
expected_err_msg = (".*You can only torch.load from a file that is seekable. " +
"Please pre-load the data into a buffer like io.BytesIO and " +
"try to load from it instead.")
resource = FilelikeMock(data=b"data")
delattr(resource, "tell")
delattr(resource, "seek")
with self.assertRaisesRegex(AttributeError, expected_err_msg):
torch.load(resource)
class serialization_method(object):
def __init__(self, use_zip):
self.use_zip = use_zip
self.torch_save = torch.save
def __enter__(self, *args, **kwargs):
def wrapper(*args, **kwargs):
if '_use_new_zipfile_serialization' in kwargs:
raise RuntimeError("Cannot set method manually")
kwargs['_use_new_zipfile_serialization'] = self.use_zip
return self.torch_save(*args, **kwargs)
torch.save = wrapper
def __exit__(self, *args, **kwargs):
torch.save = self.torch_save
class TestBothSerialization(TestCase):
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
def test_serialization_new_format_old_format_compat(self, device):
x = [torch.ones(200, 200, device=device) for i in range(30)]
def test(f_new, f_old):
torch.save(x, f_new, _use_new_zipfile_serialization=True)
f_new.seek(0)
x_new_load = torch.load(f_new)
self.assertEqual(x, x_new_load)
torch.save(x, f_old, _use_new_zipfile_serialization=False)
f_old.seek(0)
x_old_load = torch.load(f_old)
self.assertEqual(x_old_load, x_new_load)
with tempfile.NamedTemporaryFile() as f_new, tempfile.NamedTemporaryFile() as f_old:
test(f_new, f_old)
class TestOldSerialization(TestCase, SerializationMixin):
# unique_key is necessary because on Python 2.7, if a warning passed to
# the warning module is the same, it is not raised again.
def _test_serialization_container(self, unique_key, filecontext_lambda):
tmpmodule_name = 'tmpmodule{}'.format(unique_key)
def import_module(name, filename):
import importlib.util
spec = importlib.util.spec_from_file_location(name, filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[module.__name__] = module
return module
with filecontext_lambda() as checkpoint:
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network1.py')
module = import_module(tmpmodule_name, fname)
torch.save(module.Net(), checkpoint)
# First check that the checkpoint can be loaded without warnings
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEquals(len(w), 0)
# Replace the module with different source
fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing',
'_internal', 'data', 'network2.py')
module = import_module(tmpmodule_name, fname)
checkpoint.seek(0)
with warnings.catch_warnings(record=True) as w:
loaded = torch.load(checkpoint)
self.assertTrue(isinstance(loaded, module.Net))
if can_retrieve_source:
self.assertEquals(len(w), 1)
self.assertTrue(w[0].category, 'SourceChangeWarning')
def test_serialization_container(self):
self._test_serialization_container('file', tempfile.NamedTemporaryFile)
def test_serialization_container_filelike(self):
self._test_serialization_container('filelike', BytesIOContext)
def test_serialization_offset(self):
a = torch.randn(5, 5)
b = torch.randn(1024, 1024, 512, dtype=torch.float32)
m = torch.nn.Conv2d(1, 1, (1, 3))
i, j = 41, 43
with tempfile.NamedTemporaryFile() as f:
pickle.dump(i, f)
torch.save(a, f)
pickle.dump(j, f)
torch.save(b, f)
torch.save(m, f)
self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024)
f.seek(0)
i_loaded = pickle.load(f)
a_loaded = torch.load(f)
j_loaded = pickle.load(f)
b_loaded = torch.load(f)
m_loaded = torch.load(f)
self.assertTrue(torch.equal(a, a_loaded))
self.assertTrue(torch.equal(b, b_loaded))
self.assertTrue(m.kernel_size == m_loaded.kernel_size)
self.assertEqual(i, i_loaded)
self.assertEqual(j, j_loaded)
def test_serialization_offset_filelike(self):
a = torch.randn(5, 5)
b = torch.randn(1024, 1024, 512, dtype=torch.float32)
i, j = 41, 43
with BytesIOContext() as f:
pickle.dump(i, f)
torch.save(a, f)
pickle.dump(j, f)
torch.save(b, f)
self.assertTrue(f.tell() > 2 * 1024 * 1024 * 1024)
f.seek(0)
i_loaded = pickle.load(f)
a_loaded = torch.load(f)
j_loaded = pickle.load(f)
b_loaded = torch.load(f)
self.assertTrue(torch.equal(a, a_loaded))
self.assertTrue(torch.equal(b, b_loaded))
self.assertEqual(i, i_loaded)
self.assertEqual(j, j_loaded)
def run(self, *args, **kwargs):
with serialization_method(use_zip=False):
return super(TestOldSerialization, self).run(*args, **kwargs)
class TestSerialization(TestCase, SerializationMixin):
def test_serialization_zipfile(self):
data = self._test_serialization_data()
def test(name_or_buffer):
torch.save(data, name_or_buffer)
if hasattr(name_or_buffer, 'seek'):
name_or_buffer.seek(0)
result = torch.load(name_or_buffer)
self.assertEqual(result, data)
with tempfile.NamedTemporaryFile() as f:
test(f)
with TemporaryFileName() as fname:
test(fname)
test(io.BytesIO())
def test_serialization_zipfile_actually_jit(self):
with tempfile.NamedTemporaryFile() as f:
torch.jit.save(torch.jit.script(torch.nn.Linear(3, 4)), f)
f.seek(0)
torch.load(f)
# Ensure large zip64 serialization works properly
def test_serialization_2gb_file(self):
big_model = torch.nn.Conv2d(20000, 3200, kernel_size=3)
with BytesIOContext() as f:
torch.save(big_model, f)
f.seek(0)
state = torch.load(f)
def test_pathlike_serialization(self):
model = torch.nn.Conv2d(20, 3200, kernel_size=3)
with TemporaryFileName() as fname:
path = pathlib.Path(fname)
torch.save(model, path)
torch.load(path)
def test_meta_serialization(self):
big_model = torch.nn.Conv2d(20000, 320000, kernel_size=3, device='meta')
with BytesIOContext() as f:
torch.save(big_model, f)
f.seek(0)
state = torch.load(f)
self.assertEqual(state.weight.size(), big_model.weight.size())
def run(self, *args, **kwargs):
with serialization_method(use_zip=True):
return super(TestSerialization, self).run(*args, **kwargs)
instantiate_device_type_tests(TestBothSerialization, globals())
if __name__ == '__main__':
run_tests()
|
[
"[email protected]"
] | |
f4dfc272af48dd327f4b10f236a506542361cb96
|
167b90bff7f1db51a066f7b8f6f543b77a077ebf
|
/exercise087.py
|
57696072a4ad8dc9594e5f6493b4d3508ae13084
|
[] |
no_license
|
DanielMafra/Python-LanguageStudies
|
9bcbe753c14e5aa2b23b11c5e103cf00c7dfcad3
|
29700f832ebbddad6e74d88add70c08eeba14054
|
refs/heads/main
| 2023-07-02T09:34:31.856246 | 2021-07-29T21:34:39 | 2021-07-29T21:34:39 | 389,669,574 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 723 |
py
|
headquarters = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
spar = mai = scol = 0
for l in range(0, 3):
for c in range(0, 3):
headquarters[l][c] = int(input(f'Enter a value for [{l}, {c}]: '))
print('-=' * 30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{headquarters[l][c]:^5}]', end='')
if headquarters[l][c] % 2 == 0:
spar += headquarters[l][c]
print()
print('-=' * 30)
print(f'Sum pairs: {spar}')
for l in range(0, 3):
scol += headquarters[l][2]
print(f'Sum third column: {scol}')
for c in range(0, 3):
if c == 0:
mai = headquarters[1][c]
elif headquarters[1][c] > mai:
mai = headquarters[1][c]
print(f'Highest value second row: {mai}')
|
[
"[email protected]"
] | |
4c29258f54f9be2f278ea84aec911870138c9616
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_073/ch60_2019_08_19_13_12_46_356317.py
|
f51bbcd61001ea0ca3b849643fd9c759d8083ad8
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 105 |
py
|
def asteriscos(n):
barra='*'*n
return barra
n=int(input('digite_um_numero'))
print(gera_barra(n))
|
[
"[email protected]"
] | |
ede2c4d557022d282d3225e376d14e79ed3466a0
|
cfad82fd82eeb832bce6f8d3c30aad05d000ae9b
|
/migrations/versions/13b676178b08_box_plot.py
|
d05afe3dbcf455bfc4681ad633e5ab85ccde9b6a
|
[
"Apache-2.0"
] |
permissive
|
dpdi-unifor/caipirinha
|
73508fcc6aa519749db69d1126a65e4f27099ffd
|
43e4512c282cfcfa988ea38e160939b6f3c2c604
|
refs/heads/master
| 2022-12-08T04:39:19.637631 | 2020-07-16T22:50:30 | 2020-07-16T22:50:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,498 |
py
|
"""box_plot
Revision ID: 13b676178b08
Revises: cfacecb61ac1
Create Date: 2019-04-16 21:55:10.723443
"""
from alembic import op
from sqlalchemy import String, Integer
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '13b676178b08'
down_revision = 'cfacecb61ac1'
branch_labels = None
depends_on = None
def insert_visualization_type():
tb = table(
'visualization_type',
column('id', Integer),
column('name', String),
column('help', String),
column('icon', String))
all_ops = [
(123, 'box-plot', 'Box plot', 'fa-chart'),
(124, 'histogram', 'Histogram', 'fa-chart'),
]
rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in
all_ops]
op.bulk_insert(tb, rows)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
op.execute(text('START TRANSACTION'))
insert_visualization_type()
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
# noinspection PyBroadException
def downgrade():
try:
op.execute(text('START TRANSACTION'))
op.execute(text('SET FOREIGN_KEY_CHECKS=0;'))
op.execute(
text("DELETE FROM visualization_type WHERE id IN (123, 124)"))
op.execute(text('SET FOREIGN_KEY_CHECKS=1;'))
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
|
[
"[email protected]"
] | |
d022ac9505b8dfedb695be5bd5e43e6ab95c0ebd
|
22986b48baf0bb2e87055534cc47743292d123e7
|
/simcorpfinder/wsgi.py
|
264af00372a7a50265128d1d341b77f9b9dad156
|
[] |
no_license
|
GoatWang/DjangoTest
|
9d8e97351ce61f8815cc0f8b957c77f8b7122789
|
2c0a057c5947ff3c20b4456b2b17e874cac3d225
|
refs/heads/master
| 2021-01-22T07:32:02.740989 | 2017-09-04T09:29:04 | 2017-09-04T09:29:04 | 102,306,565 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 404 |
py
|
"""
WSGI config for simcorpfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simcorpfinder.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
acbfef4791270a3e4d12702b4d80a8beb81ca83d
|
c868d681415d152ba331bd80e0ed542832f20f0e
|
/week13/onlineShop/onlineShop/main/models.py
|
0cf27335a5481957f4aa13325b13e507ce8ef636
|
[] |
no_license
|
Yeldarmt/BFDjango
|
a297a6b0c00ffb1a269f05c7e6665c5d34a51097
|
b8256ff1d5f2125495df66eabf267fc17e667aeb
|
refs/heads/master
| 2022-11-30T12:45:17.356453 | 2020-04-19T16:50:26 | 2020-04-19T16:50:26 | 233,515,749 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,584 |
py
|
from django.db import models
import os
from django.core.exceptions import ValidationError
ALLOWED_EXTENSIONS = ['.docx', '.pdf']
def validate_extension(value):
split_ext = os.path.splitext(value.name)
print('split_ext', split_ext)
if len(split_ext) > 1:
ext = split_ext[1]
if not ext.lower() in ALLOWED_EXTENSIONS:
raise ValidationError(f'not allowed file, valid extensions: {ALLOWED_EXTENSIONS}')
class Category(models.Model):
name = models.CharField(max_length=300)
category_desc = models.FileField(upload_to='desc_files',
validators=[validate_extension],
null=True, blank=True)
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
return 'Category id: {}, name: {}'.format(self.id, self.name)
def _try_create_profile_for_user(self, created):
print('not in _try_create_profile_for_user')
if created:
print('in _try_create_profile_for_user')
CategoryFullInfo.objects.get_or_create(category=self)
def save(self, *args, **kwargs):
print('before saving')
created = self.id is None
self.name = f'main_{self.name}'
super(Category, self).save(*args, **kwargs)
self._try_create_profile_for_user(created)
print('after saving')
class CategoryFullInfo(models.Model):
category = models.OneToOneField(Category, on_delete=models.CASCADE)
category_info = models.TextField(default='')
|
[
"[email protected]"
] | |
182f07a375be26f82adae3aaf222e5e132b84027
|
8015f1c62a2cb4efd21aa8938336913bf8117868
|
/bamap/ba0835.pngMap.py
|
80cc596be7d4227a6457d246d70ba629efdd12ef
|
[] |
no_license
|
GamerNoTitle/Beepers-and-OLED
|
675b5e3c179df0f0e27b42bf594c43860d03b9af
|
afe1340e5394ae96bda5f9022a8a66824368091e
|
refs/heads/master
| 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,468 |
py
|
ba0835.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000101101000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000111111111000100000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000111111111111111111000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000011111111111111111111100000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000001111111111111111111111111111000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000101111111111111111111111111111111000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000001111111111111111111111111111111111110000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000111111111111111111111111111111111111100000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111111111110110100000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111111111000000100000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111110101000000010000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111100001000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111000000000001000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111111000000000010000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111111000000000001010000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111111111110010010000010000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000111111111111111111100000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000001111111111111111000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000001001111111110000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000100111111000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
]
|
[
"[email protected]"
] | |
494736c4c41ac8fb3a48320e6706ab5f44726047
|
28b06ed3e562eb9c2b372934ea9a04e81320bb59
|
/setup.py
|
7fb9e8c05924b417dac0eb5d9dd8f89ddc9da35d
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
biomodels/MODEL1310110026
|
1f2cc0849110b22ce30be8e7444eba0c29e293db
|
503fd2992f9c20b25c633fecb97177fd0116404b
|
refs/heads/master
| 2016-09-05T19:27:29.698048 | 2014-10-16T05:30:04 | 2014-10-16T05:30:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
from setuptools import setup, find_packages
setup(name='MODEL1310110026',
version=20140916,
description='MODEL1310110026 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110026',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
[
"[email protected]"
] | |
e64fde0b61a1ff49dd4a9e786d2b8546b5e85b1f
|
67747b6ee7d4e1d24eadc5d0390f38d609501ccd
|
/爬虫/图片爬虫/crawlpjt/crawlpjt/spiders/smartspider.py
|
8343a383f946ac46bbbe47bf1f7fbbc7434ef945
|
[] |
no_license
|
callmeliuchu/codeGitBook
|
577937013a355ba36a688792f5722d31be33fc0b
|
780cac294db47a46bb14129f166dd31c180e9473
|
refs/heads/master
| 2020-12-03T08:17:55.851568 | 2017-11-28T15:26:38 | 2017-11-28T15:26:38 | 95,679,807 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 748 |
py
|
# -*- coding: utf-8 -*-
import scrapy
from crawlpjt.items import CrawlpjtItem
from scrapy.http import Request
import re
class SmartspiderSpider(scrapy.Spider):
name = "smartspider"
allowed_domains = ["mmjpg.com"]
start_urls = ['http://www.douluo123.com/shaonvmanhua/114220.html']
def parse(self, response):
item = CrawlpjtItem()
paturl = "(http://dlpic.fungood.cn/uploads/.*?\.(jpg|png))"
item['picurl'] = re.compile(paturl).findall(str(response.body))
patid = "http://dlpic.fungood.cn/uploads/.*?/(.*?)\.(jpg|png)"
item['picid']=re.compile(patid).findall(str(response.body))
yield item
for i in range(201,220):
url = "http://www.douluo123.com/shaonvmanhua/114" + str(i) + ".html"
yield Request(url,callback=self.parse)
|
[
"[email protected]"
] | |
b0e4c3d769de1f3108c005c2de386ec073ad6d44
|
5147809b6382397185f2b1b6f43a272ea9e4f150
|
/reddening-colors.py
|
53c1d0fc71ecdf98e7c8f364825d71dbb0818e2b
|
[] |
no_license
|
AngelGSoto/python-programs-1
|
f8df3d498756ed6332504e8601924991803c3561
|
5e2b607ee9dd1459143a55218f9890a61539fd6a
|
refs/heads/master
| 2023-01-08T00:24:15.664946 | 2020-10-23T21:07:23 | 2020-10-23T21:07:23 | 297,742,537 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,460 |
py
|
# -*- coding: utf-8 -*-
'''
Make color-color diagrams for JPLUS 2017
'''
from __future__ import print_function
import numpy as np
import glob
import json
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from scipy.stats import gaussian_kde
import pandas as pd
from astropy.table import Table
#import StringIO
from sympy import S, symbols
from scipy.optimize import fsolve
import os
#reading the files .json
pattern = "*-spectros/*-JPLUS17-magnitude.json"
file_list = glob.glob(pattern)
# def filter_mag(e, s, f1, f2, f3):
# '''
# Calculate the colors using any of set of filters
# '''
# col, col0 = [], []
# if data['id'].endswith(e):
# if data['id'].startswith(str(s)):
# filter1 = data[f1]
# filter2 = data[f2]
# filter3 = data[f3]
# diff = filter1 - filter2
# diff0 = filter1 - filter3
# col.append(diff)
# col0.append(diff0)
# return col, col0
def filter_mag(e, s, f1, f2, f3, f4):
'''
Calculate the colors using any of set of filters
'''
col, col0 = [], []
if data['id'].endswith(e):
if data['id'].startswith(str(s)):
filter1 = data[f1]
filter2 = data[f2]
filter3 = data[f3]
filter4 = data[f4]
diff = filter1 - filter2
diff0 = filter3 - filter4
col.append(diff)
col0.append(diff0)
return col, col0
def plot_mag(f1, f2, f3, f4):
x1, y1 = filter_mag("E00_100", "", f1, f2, f3, f4)
x2, y2 = filter_mag("E01_100", "", f1, f2, f3, f4)
x3, y3 = filter_mag("E02_100", "", f1, f2, f3, f4)
x4, y4 = filter_mag("E00_300", "", f1, f2, f3, f4)
x5, y5 = filter_mag("E01_300", "", f1, f2, f3, f4)
x6, y6 = filter_mag("E02_300", "", f1, f2, f3, f4)
x7, y7 = filter_mag("E00_600", "", f1, f2, f3, f4)
x8, y8 = filter_mag("E01_600", "", f1, f2, f3, f4)
x9, y9 = filter_mag("E02_600", "", f1, f2, f3, f4)
for a, b in zip(x1, y1):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x4, y4):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x7, y7):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x3, y3):
A1[1].append(a)
B1[1].append(b)
for a, b in zip(x6, y6):
A1[1].append(a)
B1[1].append(b)
for a, b in zip(x9, y9):
A1[1].append(a)
B1[1].append(b)
n = 3
A1, B1 = [[] for _ in range(n)], [[] for _ in range(n)]
d_644_jplus, d_768_jplus = [], []
d_644_jplus1, d_768_jplus1 = [], []
label=[]
for file_name in file_list:
with open(file_name) as f:
data = json.load(f)
# if data['id'].endswith("1-HPNe"):
# label.append("")
# elif data['id'].endswith("SLOAN-HPNe-"):
# label.append("H4-1")
# elif data['id'].endswith("1359559-HPNe"):
# label.append("PNG 135.9+55.9")
if data['id'].startswith("ngc"):
label.append("")
elif data['id'].startswith("mwc"):
label.append("")
#plot_mag("F625_r_sdss", "F660", "F766_i_sdss")
#plot_mag("F515", "F660", "F861")
#plot_mag("F911_z_sdss", "F660", "F480_g_sdss")
#plot_mag("F480_g_sdss", "F515", "F660", "F625_r_sdss")
plot_mag("F410", "F660", "F480_g_sdss", "F766_i_sdss")
print(np.mean(B1[0]), np.mean(A1[0]))
print(np.mean(B1[1]), np.mean(A1[1]))
|
[
"[email protected]"
] | |
1a174e26cf1fe96bba7762cd1b733be0dcec6705
|
6e9d6a682f20054e13d3764e95b8bd3b7b64fabf
|
/dailychallenge525.py
|
d10d8a429a88bee8b70f25c94e5e6f075a337848
|
[] |
no_license
|
SeanyDcode/codechallenges
|
30a271e04bc2b360bca923ae868be65a9533c8db
|
947cf3034911b381afaf777794d22d2af06aa5ba
|
refs/heads/master
| 2022-11-07T21:22:56.927863 | 2022-10-18T23:33:13 | 2022-10-18T23:33:13 | 154,498,776 | 1 | 0 | null | 2022-10-18T23:02:05 | 2018-10-24T12:38:45 |
Python
|
UTF-8
|
Python
| false | false | 412 |
py
|
# from dailycodingproblem.com
#
# Daily Challenge #525
# Given a N by M matrix of numbers, print out the matrix in a clockwise spiral.
#
# For example, given the following matrix:
#
# [[1, 2, 3, 4, 5],
# [6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [16, 17, 18, 19, 20]]
# You should print out the following:
#
# 1
# 2
# 3
# 4
# 5
# 10
# 15
# 20
# 19
# 18
# 17
# 16
# 11
# 6
# 7
# 8
# 9
# 14
# 13
# 12
|
[
"[email protected]"
] | |
853a5e39a2163b7ab19cb4af8f77ccfbb4328637
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2371/60782/270717.py
|
94105bafb18dd168ee67d41dd0341234d6bebbe9
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 917 |
py
|
"""
题目描述
贾维斯在计算字母数字字符回文率方面很弱。
当钢铁侠忙于与灭霸战斗时,他需要启动音速冲动,但是贾维斯不会计算回文。
给定一个包含字母数字字符的字符串S,找出字符串是否是回文,拯救钢铁侠。
"""
"""
输入描述
输入的第一行包含T,即测试用例的数量。随后是T个测试用例。测试用例的每一行都包含字符串“S”。
"""
"""
输出描述
如果字符串是回文,则输出的每一行都包含“ YES”,如果字符串不是回文,则输出“ NO”。
"""
times = int(input())
while times > 0:
times = times - 1
string = input()
''.join([x for x in string if x.isalpha()])
string = string.lower()
l = list(string)
l.reverse()
reverse_string = str(l)
if reverse_string == string:
print("Yes")
else:
print("No")
|
[
"[email protected]"
] | |
db0263c9651dc1ae01bf1e8ac4c68375a560f81e
|
e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484
|
/day78 课上笔记以及代码/代码/luffy_permission/rbac/urls.py
|
1ae198f501bc00ee4561ad886e7e546e6b50df48
|
[] |
no_license
|
tianshang486/Pythonlaonanhai
|
100df2cc437aad1ee1baf45bdfc4500b1302092b
|
2a5b46986f5ca684b2ae350596e293db54e1e2f4
|
refs/heads/master
| 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,178 |
py
|
from django.conf.urls import url
from rbac import views
app_name = 'rbac'
urlpatterns = [
# /app01/role/list/ # rbac:role_list
url(r'^role/list/$', views.role_list, name='role_list'),
url(r'^role/add/$', views.role, name='role_add'),
url(r'^role/edit/(\d+)/$', views.role, name='role_edit'),
url(r'^role/del/(\d+)/$', views.del_role, name='role_del'),
url(r'^menu/list/$', views.menu_list, name='menu_list'),
url(r'^menu/add/$', views.menu, name='menu_add'),
url(r'^menu/edit/(\d+)/$', views.menu, name='menu_edit'),
url(r'^permission/add/$', views.permission, name='permission_add'),
url(r'^permission/edit/(\d+)/$', views.permission, name='permission_edit'),
url(r'^permission/del/(\d+)/$', views.del_permission, name='permission_del'),
url(r'^multi/permissions/$', views.multi_permissions, name='multi_permissions'),
url(r'^distribute/permissions/$', views.distribute_permissions, name='distribute_permissions'),
url(r'^distribute/permissions2/$', views.distribute_permissions2, name='distribute_permissions'),
url(r'^permissions_tree/$', views.permissions_tree, name='permissions_tree'),
]
|
[
"[email protected]"
] | |
ece832936045045a43c026cc845db74a25fc0911
|
1f4505ed66f4fd68c6d1edf18ecff58362742fad
|
/algorithm/Backtracking/131_Palindrome_Partitioning.py
|
94732d477f1b056aeaa91fc00bc9c257da15fab9
|
[
"MIT"
] |
permissive
|
nishitpatel01/Data-Science-Toolbox
|
0d9b63a365698cc4a423abd5881cde8f6bf672be
|
80dc1310d103c9481feff8792426c550ddcc0a36
|
refs/heads/master
| 2020-05-19T08:26:40.319321 | 2019-05-04T05:58:48 | 2019-05-04T05:58:48 | 184,921,541 | 1 | 1 |
MIT
| 2019-05-04T16:53:21 | 2019-05-04T16:53:20 | null |
UTF-8
|
Python
| false | false | 3,120 |
py
|
import collections
class Solution(object):
# brute force
def partition2(self, s):
res = []
def dfs(s, path, res):
if not s:
res.append(path)
return
# warning: index ends with len(s), not len(s) - 1
# because s[:len(s)] is the whole string
for i in range(1, len(s) + 1):
if s[:i] == s[:i:-1]:
dfs(s[i:], path + [s[:i]], res)
dfs(s, [], res)
return res
# memo, backward
# use a list of list, with index as implicit key
# index i stores a list of palindromes made from first i characters
def partitionDBRec(self, s):
# we'll use string length as key to retrieve, so memo need one extra space
self.memo = [None] * (len(s) + 1)
# zero length string has an empoty list, which is used as base case
self.memo[0] = [[]]
def partition_core(s):
s_len = len(s)
if self.memo[s_len]:
return self.memo[s_len]
res = []
for i in range(len(s) - 1, - 1, - 1):
current = s[i:]
if current == current[::-1]:
# pre_res = partition_core(s[:i])
# res += [r + [current] for r in pre_res]
for rem in partition_core(s[:i]):
res.append(rem + [current]) # concatenate two list, and concatenate list to res
self.memo[s_len] = res
return res
return partition_core(s)
# same logic as above, same recurson
# def partitionDPRec2(self, s):
# def helper(s, h):
# if s in h:
# return h[s]
# h[s] = []
# for i in range(len(s)):
# if s[:i + 1] == s[:i + 1][::-1]:
# if i + 1 == len(s):
# h[s].append([s])
# else:
# for rest in self.helper(s[i + 1:], h):
# h[s].append([s[:i + 1]] + rest)
# return h[s]
#
# return helper(s, {})
def partitionDP(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
def make_results(index, pallindromes, result, results):
if index >= len(s):
results += result[:]
else:
for pallindrome in pallindromes[index]:
make_results(index + len(pallindrome), pallindromes, result + [pallindrome], results)
n = len(s)
is_pallindrome = set()
pallindromes = collections.defaultdict(list)
for i in range(0, len(s)):
for j in range(i + 1):
if s[i] == s[j] and ((i - j) <= 1 or (j + 1, i - 1) in is_pallindrome):
is_pallindrome.add((j, i))
substring = s[j:i + 1]
pallindromes[j] += substring
results = []
make_results(0, pallindromes, [], results)
return results
solver = Solution()
print(solver.partitionDPRec2("aab"))
|
[
"[email protected]"
] | |
80eac598597ba1c160fb0155aeab022602216b45
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/Nowcoder/第八届“图灵杯”NEUQ-ACM程序设计竞赛个人赛/E.py
|
1ddd5930f982aa12adfaaa374f53809ba517673b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
#coding=utf-8
from math import sqrt
def solve(n, x):
a, b = x - 1, n - x
a, b = min(a, b), max(a, b)
fi = (sqrt(5) + 1) / 2
k = b - a
# print fi, k
return a == int(k * fi)
T = int(raw_input())
for case_ in xrange(T):
(n, x) = map(int, raw_input().split())
if solve(n, x):
# second player wins
print 'ma la se mi no.1!'
else:
# first player wins
print 'yo xi no forever!'
'''
^^^TEST^^^
8
1 1
10 3
17 6
12 5
4 3
9 6
12 8
17 11
--------
ma la se mi no.1!
yo xi no forever!
yo xi no forever!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
$$$TEST$$$
'''
|
[
"[email protected]"
] | |
57a0ac9bc20d54d341a96192fb4cc8965022b122
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/module_20200711111837.py
|
18d6ee88829fbaa10e977527ddf46ba38b94a3bc
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39 |
py
|
# 모듈은 확장자가 .py이다.
im
|
[
"[email protected]"
] | |
0ccfc081bdefc0a7fbcad29180a82dc2d1f77dc8
|
4f1218079f90a65befbf658679721886d71f4ee8
|
/python/atcoder/KH_seisen/1.2/b069.py
|
ce3076d036cd09429a2c1d39fc54d8e395345ed6
|
[] |
no_license
|
Escaity/Library
|
9f57767617422a7930caf48718d18f7ebef81547
|
b34d8600e0a65845f1b3a16eb4b98fc7087a3160
|
refs/heads/master
| 2022-07-29T16:18:33.073738 | 2022-07-17T10:25:22 | 2022-07-17T10:25:22 | 238,588,249 | 0 | 0 | null | 2021-08-17T03:02:34 | 2020-02-06T02:04:08 |
Python
|
UTF-8
|
Python
| false | false | 117 |
py
|
s = list(input())
l = s.pop(0)
r = s.pop()
print(l, len(s), r, sep="")
"""
a,*b,c=input()
print(a+str(len(b))+c)
"""
|
[
"[email protected]"
] | |
dade28675e6427d46d2f875da1203198c231c5ea
|
ed14784949d5fa2208aa99ae1e31be0b6d1f196d
|
/backend/fametok_19651/settings.py
|
6b0a917a15f6ad8150a19ad7f7b3fdf5478c88bb
|
[] |
no_license
|
crowdbotics-apps/fametok-19651
|
d530a768e791d04394133ec8e92731c9d4f1f02e
|
aeecff19e0a628fed01c0d85d81c90c5dd98a99c
|
refs/heads/master
| 2022-12-03T06:33:14.174775 | 2020-08-19T12:41:58 | 2020-08-19T12:41:58 | 288,728,568 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,841 |
py
|
"""
Django settings for fametok_19651 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"course",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "fametok_19651.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "fametok_19651.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] | |
abf86800d2a983e5d00cb8ab431150246bb1bdad
|
552ba370742e346dbb1cf7c7bf4b99648a17979b
|
/tbx/services/migrations/0006_new_servicepage_model.py
|
b9ca45203ff3651aafcc0a436498635c2cc4649e
|
[
"MIT"
] |
permissive
|
arush15june/wagtail-torchbox
|
73e5cdae81b524bd1ee9c563cdc8a7b5315a809e
|
c4d06e096c72bd8007975dc016133024f9d27fab
|
refs/heads/master
| 2022-12-25T05:39:32.309635 | 2020-08-13T14:50:42 | 2020-08-13T14:50:42 | 299,591,277 | 0 | 0 |
MIT
| 2020-09-29T11:08:49 | 2020-09-29T11:08:48 | null |
UTF-8
|
Python
| false | false | 6,030 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-21 16:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0017_map_tags_to_related_services'),
('work', '0014_map_tags_to_related_services'),
('people', '0005_contact'),
('taxonomy', '0002_initial_services'),
('wagtailcore', '0040_page_draft_title'),
('torchbox', '0120_remove_contactpage'),
('services', '0005_remove_models'),
]
operations = [
migrations.CreateModel(
name='ServicePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('strapline', models.CharField(max_length=255)),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('heading_for_key_points', wagtail.core.fields.RichTextField()),
('contact', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='people.Contact')),
('service', models.OneToOneField(blank=True, help_text='Link to this service in taxonomy', null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomy.Service')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ServicePageClientLogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='torchbox.TorchboxImage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='client_logos', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageFeaturedBlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('blog_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='featured_blog_posts', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageFeaturedCaseStudy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('case_study', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='work.WorkPage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='featured_case_studies', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageKeyPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('text', models.CharField(max_length=255)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='key_points', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageTestimonial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('quote', models.TextField()),
('name', models.CharField(max_length=255)),
('role', models.CharField(max_length=255)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='testimonials', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageUSAClientLogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='torchbox.TorchboxImage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='usa_client_logos', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
[
"[email protected]"
] | |
d20c84f40a7f1f81a0a29e9c5675376a3601eb24
|
6c2961116fb4af485ec0f5806b6dca34c84eb27c
|
/python/paddle/fluid/tests/unittests/ngraph/test_mul_ngraph_op.py
|
412127af701045c0efbd1c0e2478eaa46ce245f9
|
[
"Apache-2.0"
] |
permissive
|
NHZlX/Paddle
|
29e81479116e3766d50856cbd33c18f606580560
|
91a2627251ed8d989c45b15fea323572bc9c5544
|
refs/heads/develop
| 2020-12-30T16:33:15.983832 | 2020-03-18T05:52:11 | 2020-03-18T05:52:11 | 92,489,093 | 1 | 2 |
Apache-2.0
| 2019-04-15T09:20:27 | 2017-05-26T08:24:43 |
C++
|
UTF-8
|
Python
| false | false | 788 |
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest, sys
sys.path.append("../")
from test_mul_op import TestMulOp, TestMulOp2
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
ee7eedfd43a41db4ba4f5048132a797a8ea062e8
|
2264807e07d88d0f0dea05d3973163430765794d
|
/wip/other/tap.py
|
daca3680b44ded7d42deb529c89fec50356ba486
|
[
"MIT"
] |
permissive
|
deadsy/pycs
|
e0dc9d2d3ec2ff0f7019d0a31d94e0a1237c24b1
|
7e262c710255ac9742703c7ccedb0ae90ae079ec
|
refs/heads/master
| 2023-03-05T10:33:03.485327 | 2023-02-27T19:34:27 | 2023-02-27T19:34:27 | 38,818,707 | 57 | 11 | null | 2017-12-21T22:40:29 | 2015-07-09T12:24:47 |
Python
|
UTF-8
|
Python
| false | false | 2,818 |
py
|
#-----------------------------------------------------------------------------
"""
Generate TMS sequences for JTAG TAP state machine transitions
Note:
State names are taken from the SVF file specification.
This keeps things simple when processing SVF files.
"""
#-----------------------------------------------------------------------------
state_machine = {
'RESET': ('IDLE','RESET'),
'IDLE': ('IDLE','DRSELECT'),
'DRSELECT': ('DRCAPTURE','IRSELECT'),
'DRCAPTURE': ('DRSHIFT','DREXIT1'),
'DRSHIFT': ('DRSHIFT','DREXIT1'),
'DREXIT1': ('DRPAUSE','DRUPDATE'),
'DRPAUSE': ('DRPAUSE','DREXIT2'),
'DREXIT2': ('DRSHIFT','DRUPDATE'),
'DRUPDATE': ('IDLE','DRSELECT'),
'IRSELECT': ('IRCAPTURE','RESET'),
'IRCAPTURE': ('IRSHIFT','IREXIT1'),
'IRSHIFT': ('IRSHIFT','IREXIT1'),
'IREXIT1': ('IRPAUSE','IRUPDATE'),
'IRPAUSE': ('IRPAUSE','IREXIT2'),
'IREXIT2': ('IRSHIFT','IRUPDATE'),
'IRUPDATE': ('IDLE','DRSELECT'),
}
#-----------------------------------------------------------------------------
# build a cache of all state transitions for fast lookup
tap_cache = {}
def search(path, current, dst):
"""return the shortest state path linking src and dst states"""
# are we done?
if current == dst:
return path
# get the two outgoing states
(state0, state1) = state_machine[current]
# search paths with state0
if state0 in path:
# looping - not the shortest path
path0 = None
else:
path0 = list(path)
path0.append(state0)
path0 = search(path0, state0, dst)
# search paths with state1
if state1 in path:
# looping - not the shortest path
path1 = None
else:
path1 = list(path)
path1.append(state1)
path1 = search(path1, state1, dst)
# return the shortest path
if path0 is None:
return path1
if path1 is None:
return path0
return path0 if len(path0) < len(path1) else path1
def tms(path, current):
"""return a tms bit tuple from the current state along the path"""
s = []
for state in path:
s.append(state_machine[current].index(state))
current = state
return tuple(s)
def init_cache():
states = state_machine.keys()
for src in states:
for dst in states:
path = search([], src, dst)
tap_cache['%s->%s' % (src, dst)] = tms(path, src)
# any state to RESET
tap_cache['*->RESET'] = (1,1,1,1,1)
def lookup(src, dst):
if len(tap_cache) == 0:
init_cache()
return tap_cache['%s->%s' % (src, dst)]
#-----------------------------------------------------------------------------
#class tap(object):
# """JTAG TAP State Machine"""
# def __init__(self):
# pass
#-----------------------------------------------------------------------------
|
[
"[email protected]"
] | |
a9cd02628949811c68f380fb680e2de40035eea0
|
c1f9f4926cf7ac20a854e3222d18b5a0e3eeb6b3
|
/minos/thesis/styles.py
|
cba205dff8b46462376467149ce793834ec7d805
|
[] |
no_license
|
ndevenish/python-minos
|
d794ec02ff2a7617b57e7d4ad983eef1ac5d071f
|
43876f473ac992e76037bfda219b56f068ab52fd
|
refs/heads/master
| 2021-03-19T06:14:19.390943 | 2015-04-21T20:51:59 | 2015-04-21T20:51:59 | 25,158,699 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,081 |
py
|
#coding: utf-8
# class Sizes(object):
# A4 = (8.3-0.4, 11.7-0.4) # With 5mm margin on all sides
# Standard = (5.9, 5.5)
# Wide = (4.9, 3.3)
# Half =
import matplotlib as mpl
#from minos.thesis import Sizes
class Sizes(object):
A4 = (8.3-0.4, 11.7-0.4) # With 5mm margin on all sides
Standard = (5.9, 4.4)
Wide = (5.9, 3.3)
Half = (5.9*0.49, 5.9*0.49*(3./4.))
FullPage = (5.9, 5.9*1.4) # A4 ratio, but thesis fit
Standard = {
"figure.figsize": Sizes.Standard,
"font.size": 12,
"legend.numpoints": 1,
"legend.fontsize": "medium"
}
Wide = {
"figure.figsize": Sizes.Wide,
"font.size": 12,
}
# Used for half-width 4:3 plots e.g. side-by-side
Half = {
"figure.figsize": Sizes.Half,
"font.size": 9,
"legend.fontsize": "small",
}
FullPage = {
"figure.figsize": Sizes.FullPage,
}
def figure_style(style):
"""Uses the matplotlib style context manager for a specific function"""
def _wrap(fn):
def _innerwrap(*args, **kwargs):
with mpl.style.context(style):
return fn(*args, **kwargs)
return _innerwrap
return _wrap
|
[
"[email protected]"
] | |
36d611a427b99241d39486c5737c8fb20e4e1194
|
2af94f8a7609d47fdcea28a2132c4f8bacb103e3
|
/lib/idigi_pc.py
|
62d44db918f767b61b1bc902aa601d06c006764b
|
[] |
no_license
|
bernhara/DigiGateway4Raph
|
685527723f0b306f387233c78d27fe9d78717c38
|
f36ba29ef883d70f94b8609ff734b5dcde786c66
|
refs/heads/master
| 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,223 |
py
|
############################################################################
# #
# Copyright (c)2011, Digi International (Digi). All Rights Reserved. #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation, without fee and without a signed licensing agreement, is #
# hereby granted, provided that the software is used on Digi products only #
# and that the software contain this copyright notice, and the following #
# two paragraphs appear in all copies, modifications, and distributions as #
# well. ContactProduct Management, Digi International, Inc., 11001 Bren #
# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #
# opportunities for non-Digi products. #
# #
# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #
# PROVIDED HEREUNDER IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND. #
# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #
# ENHANCEMENTS, OR MODIFICATIONS. #
# #
# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #
# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #
# #
############################################################################
''' PC compatibility stub for Digi device cwm module. '''
def _get_ws_parms():
''' Dummy values for invalid configuration (from cwmmodule.cc). '''
return (None, None, None, 0, 0)
|
[
"[email protected]"
] | |
3e21ac19dada8214c4ab79d2e9fcbdcaed32fc2e
|
1cf74ce90fd2bbe6d450312ae14b0cd581740281
|
/tests/test_evaluator.py
|
cfc38c0bfda7651c10db719049e0b4245dc9d56a
|
[
"Apache-2.0"
] |
permissive
|
bearstech/whirlwind
|
aa05e4a0cdd81ef8b9260ccfb4ba2325dae49f46
|
2776de5c615bf5b6e1b2c30f917527321079817c
|
refs/heads/master
| 2021-06-06T17:32:41.669973 | 2020-10-13T09:46:24 | 2020-10-13T09:46:24 | 9,644,453 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,042 |
py
|
import unittest
from whirlwind import evaluator
from whirlwind.mock import MockStaticReader, MockFinder
from whirlwind.storage import Store
from whirlwind.attime import parseATTime
class EvaluatorTest(unittest.TestCase):
def setUp(self):
mock_reader = MockStaticReader()
finder = MockFinder({
'one': MockStaticReader(1),
'three': MockStaticReader(3),
'five': MockStaticReader(5)
})
self.store = Store([finder], hosts=None)
def _evaluator(self, tokens):
context = {
'startTime': parseATTime('-2days'),
'endTime': parseATTime('now'),
'localOnly': True
}
return evaluator.evaluateTarget(self.store, context, tokens)
def test_average(self):
values = self._evaluator('averageSeries(one, three)')
for v in values[0]:
assert v == 2.0
def test_sum(self):
values = self._evaluator('sumSeries(one, three)')
for v in values[0]:
assert v == 4.0
def test_diff(self):
values = self._evaluator('diffSeries(five, one)')
for v in values[0]:
assert v == 4.0
# Doesn't work in the graphite project too
#values = self._evaluator('diffSeries(a.b.5,3)')
#for v in values[0]:
#assert v == 2.0
# FIXME
#def test_min_max(self):
#store = MockStore({'a.b.c': [1, 2, 3, 4, 1, 5],
#'d.e.f': [2, 1, 3, 0, 6, 7]
#})
#context = {
#'startTime': '-2days',
#'endTime': 'now',
#}
#tokens = 'minSeries(a.b.c, d.e.f)'
#values = evaluator.evaluateTarget(store, context, tokens)
#vv = [v for v in values[0] if v is not None]
#assert vv == [1, 1, 3, 0, 1, 5]
#tokens = 'maxSeries(a.b.c, d.e.f)'
#values = evaluator.evaluateTarget(store, context, tokens)
#vv = [v for v in values[0] if v is not None]
#assert vv == [2, 2, 3, 4, 6, 7]
|
[
"[email protected]"
] | |
6ea90f5be725e40ce34a353f8bb7cb2604b6367c
|
afa2ebb439e6592caf42c507a789833b9fbf44b2
|
/supervised_learning/0x02-tensorflow/7-evaluate.py
|
dcaf66e8a897d9e8fa382a62237c8ef55fef1152
|
[] |
no_license
|
anaruzz/holbertonschool-machine_learning
|
64c66a0f1d489434dd0946193747ed296760e6c8
|
91300120d38acb6440a6dbb8c408b1193c07de88
|
refs/heads/master
| 2023-07-30T20:09:30.416167 | 2021-09-23T16:22:40 | 2021-09-23T16:22:40 | 279,293,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 728 |
py
|
#!/usr/bin/env python3
"""
Script that Evaluates the output of a neural network
"""
import tensorflow as tf
def evaluate(X, Y, save_path):
"""
Evaluate the output of a NN
"""
sess = tf.Session()
saver = tf.train.import_meta_graph(save_path + '.meta')
saver.restore(sess, save_path)
y_pred = tf.get_collection('y_pred', scope=None)[0]
loss = tf.get_collection('loss', scope=None)[0]
accuracy = tf.get_collection('accuracy', scope=None)[0]
x = tf.get_collection('x', scope=None)[0]
y = tf.get_collection('y', scope=None)[0]
y_pred, accuracy, loss = sess.run((y_pred, accuracy, loss),
feed_dict={x: X, y: Y})
return y_pred, accuracy, loss
|
[
"[email protected]"
] | |
78776dd647b6904bb6a18538a5f55a8ee87e7683
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/System/Runtime/InteropServices/__init___parts/UCOMIConnectionPointContainer.py
|
d6d4c5c738b33d1b7dac9915eb618e3272d54a34
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,025 |
py
|
class UCOMIConnectionPointContainer:
""" Use System.Runtime.InteropServices.ComTypes.IConnectionPointContainer instead. """
def EnumConnectionPoints(self,ppEnum):
"""
EnumConnectionPoints(self: UCOMIConnectionPointContainer) -> UCOMIEnumConnectionPoints
Creates an enumerator of all the connection points supported in the connectable object,one
connection point per IID.
"""
pass
def FindConnectionPoint(self,riid,ppCP):
"""
FindConnectionPoint(self: UCOMIConnectionPointContainer,riid: Guid) -> (Guid,UCOMIConnectionPoint)
Asks the connectable object if it has a connection point for a particular IID,and if so,
returns the IConnectionPoint interface pointer to that connection point.
riid: A reference to the outgoing interface IID whose connection point is being requested.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
[
"[email protected]"
] | |
05b93131012fc6bad5c9fad35bf55749ba7a81cf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_267/ch171_2020_06_22_16_19_21_764865.py
|
e62085add4dd9fd088cd16a9e2d58cfcd82a50b6
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 489 |
py
|
class Carrinho:
def __init__(self):
self.dicio = {}
def adiciona(self, nome_produto, preco):
if nome_produto not in self.dicio.keys():
self.dicio[nome_produto] = preco
else:
self.dicio[nome_produto] += preco
def total_do_produto(self, nome_produto):
preco_total = 0
for nome,preco in self.dicio.items():
preco_total += self.dicio[nome_produto]
return preco_total
|
[
"[email protected]"
] | |
6d4cbc8ac1d3cde642db40343bb5cb5cafb3d8ed
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/52/usersdata/67/22506/submittedfiles/matriz1.py
|
b83917a56822d82448117c1894d206f2ee154a27
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,094 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def menorcoluna (a):
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
return j
def maiorcoluna (a):
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
cd=j
return cd
def maiorlinha (a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j]==1:
lb=i
return lb
def menorlinha (a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j]==1:
lc=i
return i
linhas=input("Digite a quantidade de linhas:")
colunas=input("Digite a quantidade de colunas:")
a=np.zeros((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=input("Digite um elemento:")
print ( a[menorlinha(a):maiorlinha(a)+1,menorcoluna(a):maiorcoluna(a)+1] )
|
[
"[email protected]"
] | |
b1872d572854fe276edbbdcc4e6f3e079f669d63
|
02467e9975b50c14b4dc8cdc6dc03748f9aa8245
|
/openshift/test/test_v1_route.py
|
d07794aa885877504dd0b80125303c2ca98063c8
|
[
"Apache-2.0"
] |
permissive
|
ashcrow/python-openshift
|
3995e3c4b72bf52a62bc6b07dabf3d0f709444ae
|
74c9ade612def941938016385842631342e926de
|
refs/heads/master
| 2021-01-11T19:29:04.419005 | 2017-01-18T19:31:58 | 2017-01-18T19:31:58 | 79,377,387 | 0 | 0 | null | 2017-01-18T19:46:04 | 2017-01-18T19:46:04 | null |
UTF-8
|
Python
| false | false | 4,092 |
py
|
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v1.5.0-alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from openshift.client.rest import ApiException
from openshift.client.models.v1_route import V1Route
class TestV1Route(unittest.TestCase):
""" V1Route unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Route(self):
"""
Test V1Route
"""
model = openshift.client.models.v1_route.V1Route()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
b819f0927c0e24f8d7915d6afa203893a63b2360
|
2f7f918888f57a1a341035649e6c42b264c91313
|
/vendor/atmel/atmel.py
|
e769752270820deb68ade3716f33baed0ee2ad00
|
[] |
no_license
|
arunsigood/pycs
|
8331417d46084b0ccb6381a85ac3490d97d8b162
|
4f6035a24169e4c9130f1a47ba0e68cc1bf6390b
|
refs/heads/master
| 2020-03-22T21:08:31.009632 | 2018-07-05T22:49:14 | 2018-07-05T22:49:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,525 |
py
|
#-----------------------------------------------------------------------------
"""
SoC file for Atmel SAM Devices
Read in the SVD file for a named SoC.
Run fixup functions to correct any SVD inadequecies.
"""
#-----------------------------------------------------------------------------
import soc
import cmregs
import util
#-----------------------------------------------------------------------------
# build a database of SoC devices
class soc_info(object):
def __init__(self):
pass
soc_db = {}
#-----------------------------------------------------------------------------
# NVM User Row Mapping: Not in the SVD :-(
def _eeprom_fmt(x):
return '%s' % util.memsize((1 << (14-x),0)[x == 7])
def _bootprot_fmt(x):
return '%s' % util.memsize((1 << (15-x),0)[x == 7])
_nvmr0_fieldset = (
('WDT_Period', 31, 28, None, 'WDT Period at power-on'),
('WDT_Always', 27, 27, None, 'WDT Always-On at power-on'),
('WDT_Enable', 26, 26, None, 'WDT Enable at power-on'),
('BOD12_Action', 25, 24, None, 'BOD12 Action at power-on'),
('BOD12_Disable', 23, 23, None, 'BOD12 Disable at power-on'),
('BOD12_Level', 22, 17, None, 'BOD12 threshold level at power-on'),
('BOD33_Action', 16, 15, None, 'BOD33 Action at power-on'),
('BOD33_Disable', 14, 14, None, 'BOD33 Disable at power-on'),
('BOD33_Level', 13, 8, None, 'BOD33 threshold level at power-on'),
('EEPROM', 6, 4, _eeprom_fmt, 'Used to select one of eight different EEPROM sizes'),
('BOOTPROT', 2, 0, _bootprot_fmt, 'Used to select one of eight different bootloader sizes'),
)
_nvmr1_fieldset = (
('LOCK', 31, 16, None, 'NVM Region Lock Bits'),
('BOD12_Hysteresis', 10, 10, None, 'BOD12 Hysteresis configuration Hysteresis at power-on'),
('BOD33_Hysteresis', 9, 9, None, 'BOD33 Hysteresis configuration Hysteresis at power-on'),
('WDT_WEN', 8, 8, None, 'WDT Timer Window Mode Enable at power-on'),
('WDT_EWOFFSET', 7, 4, None, 'WDT Early Warning Interrupt Time Offset at power-on'),
('WDT_Window', 3, 0, None, 'WDT Window mode time-out at power-on'),
)
_nvm_user_row_regset = (
('NVMUR0', 32, 0x0, _nvmr0_fieldset, 'NVM User Row 0'),
('NVMUR1', 32, 0x4, _nvmr1_fieldset, 'NVM User Row 1'),
)
#-----------------------------------------------------------------------------
# ATSAML21J18B
def ATSAML21J18B_fixup(d):
d.soc_name = 'ATSAML21J18B'
d.cpu_info.deviceNumInterrupts = 32
# memory and misc periperhals
d.insert(soc.make_peripheral('flash', 0x00000000, 256 << 10, None, 'Flash'))
d.insert(soc.make_peripheral('rww', 0x00400000, 8 << 10, None, 'RWW Section'))
d.insert(soc.make_peripheral('sram', 0x20000000, 32 << 10, None, 'SRAM'))
d.insert(soc.make_peripheral('lp_sram', 0x30000000, 8 << 10, None, 'Low Power SRAM'))
d.insert(soc.make_peripheral('NVMUR', 0x00804000, 8, _nvm_user_row_regset, 'NVM User Row'))
s = soc_info()
s.name = 'ATSAML21J18B'
s.svd = 'ATSAML21J18B'
s.fixups = (ATSAML21J18B_fixup, cmregs.cm0plus_fixup)
soc_db[s.name] = s
#-----------------------------------------------------------------------------
def get_device(ui, name):
"""return the device structure for the named SoC"""
if not soc_db.has_key(name):
assert False, 'unknown SoC name %s' % name
return None
info = soc_db[name]
svd_file = './vendor/atmel/svd/%s.svd.gz' % info.svd
ui.put('%s: compiling %s\n' % (name, svd_file))
device = soc.build_device(svd_file)
for f in info.fixups:
f(device)
return device
#-----------------------------------------------------------------------------
|
[
"[email protected]"
] | |
34c7eb67850c4cf233982feffdbd2f7f4ff892db
|
c376179fd8572514826e574a67d0cb4002780497
|
/mnist_lenet5/mnist_backward_lenet5.py
|
70686effbde3d50738726ec93764022df4e9a308
|
[] |
no_license
|
youthliuxi/tf_learn
|
a4ec4e03dfebe2abf550b895607b2aa76f19f2fe
|
617c26a72b4d8f95280723eda97d99a0e2b8bfbf
|
refs/heads/master
| 2020-03-19T09:45:16.750767 | 2018-06-06T10:54:56 | 2018-06-06T10:54:56 | 136,315,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,445 |
py
|
#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward_lenet5
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.005
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 2000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
def backward(mnist):
x = tf.placeholder(tf.float32, [
BATCH_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.NUM_CHANNELS])
y_ = tf.placeholder(tf.float32, [None, mnist_forward_lenet5.OUTPUT_NODE])
y = mnist_forward_lenet5.forward(x, True, REGULARIZER)
global_step = tf.Variable(0, trainable = False)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y, labels = tf.argmax(y_, 1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase = True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step = global_step)
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name = 'train')
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
reshaped_xs = np.reshape(xs, (
BATCH_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.NUM_CHANNELS))
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict = {x: reshaped_xs, y_: ys})
if i % 100 ==0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
def main():
mnist = input_data.read_data_sets("./data/", one_hot = True)
backward(mnist)
# print(mnist.train.num_examples)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
e0e6ee7e997ccdcc861d7be06c8017a1bd4981b7
|
b403c7fe56209472855dff451f0b6283d5471008
|
/Supplemental_Material/PythonProjects/10. GUI_Tkinter/GUI_BindingButtons_nb.py
|
5a8a1050361e8e0331cdb9c6a8943e10f1012300
|
[] |
no_license
|
Sandbox4KidsTM/Python_Basics
|
842bde52796896e913fdb5cc349034c52092555f
|
68c95547ec1567958fc8069e6a4bb119e436211a
|
refs/heads/master
| 2020-03-23T01:06:29.363196 | 2018-08-10T04:32:58 | 2018-08-10T04:32:58 | 140,901,128 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 281 |
py
|
from tkinter import *
root = Tk()
def printName():
print("hello, my name is Bucky")
button_1 = Button(root, text="Print my name", command=printName, bg="red", fg="white")
# command binds the function printName() to the button_1
button_1.pack()
root.mainloop()
|
[
"[email protected]"
] | |
356b51db1cd46f05c6cfb7019f4972928980f198
|
45f7a9b44ea1c45448703707da793d51151c0527
|
/ui_tests/answers/answers_02.py
|
2cb15c8bf8c7a775a15f8f87a5b2feb6fcc81548
|
[] |
no_license
|
basdijkstra/python-for-testers
|
a40d30432c31712c6d0eadbca9de73056ff10535
|
50bfbabfb2b8426eed8d048b0448959c34f71b61
|
refs/heads/master
| 2023-05-24T18:48:58.557924 | 2023-05-23T05:44:11 | 2023-05-23T05:44:11 | 219,865,075 | 7 | 4 | null | 2023-05-23T05:44:13 | 2019-11-05T22:47:09 |
Python
|
UTF-8
|
Python
| false | false | 1,623 |
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pytest
import time
@pytest.fixture
def browser():
driver = webdriver.Chrome()
driver.maximize_window()
yield driver
driver.quit()
# Exercise 2.1
# Extend this test with the following actions:
# 1. Select the menu item 'Request Loan' from the side menu bar
# 2. Specify '1000' as the requested loan amount
# 3. Specify '100' as the down payment
# 4. Select '12456' as the from account ID
# 5. Click the 'Apply Now' button
# 6. Check that the element containing the result of the loan application is displayed
# (you might need to add a time.sleep(x) statement here, which
# makes the test wait for x seconds before proceeding with the
# next statement)
# 7. Check that the result of the loan application equals 'Denied'
def test_successful_loan_request(browser):
browser.get("http://parabank.parasoft.com")
browser.find_element_by_name("username").send_keys("john")
browser.find_element_by_name("password").send_keys("demo")
browser.find_element_by_xpath("//input[@value='Log In']").click()
browser.find_element_by_link_text("Request Loan").click()
browser.find_element_by_id("amount").send_keys("1000")
browser.find_element_by_id("downPayment").send_keys("100")
Select(browser.find_element_by_id("fromAccountId")).select_by_visible_text("12456")
browser.find_element_by_xpath("//input[@value='Apply Now']").click()
time.sleep(3)
assert browser.find_element_by_id("loanStatus").is_displayed()
assert browser.find_element_by_id("loanStatus").text == "Denied"
|
[
"[email protected]"
] | |
866fb60bedda985216024081f5bbc4d86cc63df1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/126/usersdata/179/32849/submittedfiles/ap2.py
|
ae550679631479cc360ae5392d5442f324129039
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 486 |
py
|
# -*- coding: utf-8 -*-
a=int(input('primeiro numero:'))
b=int(input('segundo numero:'))
c=int(input('terceiro numero:'))
d=int(input('quarto numero:'))
if a>b and a>c and a>d:
print (a)
elif b>a and b>c and b>d:
print (b)
elif c>a and c>b and c>d:
print (c)
elif d>a and d>b and d>c:
print (d)
if a<b and a<c and a<d:
print (a)
elif b<a and b<c and b<d:
print (b)
elif c<a and c<b and c<d:
print (c)
elif d<a and d<b and d<c:
print (d)
|
[
"[email protected]"
] | |
93c2abc8dc6b584d0d34d9e88a0f7bc7b9846862
|
234c7fb0bdabdd696c8e4c6a449ac2c8e3f14ad5
|
/build/PureCloudPlatformClientV2/models/wfm_user_schedule_adherence_updated_mu_topic_user_schedule_adherence_update.py
|
764e6f46b4764efe46b5470ce8444580ee530a9a
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
humano7/platform-client-sdk-python
|
2a942c43cc2d69e8cb0c4113d998e6e0664fdedb
|
dd5b693b1fc90c9dcb36885d7227f11221db5980
|
refs/heads/master
| 2023-04-12T05:05:53.932393 | 2021-04-22T03:41:22 | 2021-04-22T03:41:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,118 |
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'user': 'WfmUserScheduleAdherenceUpdatedMuTopicUserReference',
'management_unit_id': 'str',
'team': 'WfmUserScheduleAdherenceUpdatedMuTopicUriReference',
'scheduled_activity_category': 'str',
'system_presence': 'str',
'organization_secondary_presence_id': 'str',
'routing_status': 'str',
'actual_activity_category': 'str',
'is_out_of_office': 'bool',
'adherence_state': 'str',
'impact': 'str',
'adherence_change_time': 'datetime',
'presence_update_time': 'datetime',
'active_queues': 'list[WfmUserScheduleAdherenceUpdatedMuTopicQueueReference]',
'active_queues_modified_time': 'datetime',
'removed_from_management_unit': 'bool'
}
self.attribute_map = {
'user': 'user',
'management_unit_id': 'managementUnitId',
'team': 'team',
'scheduled_activity_category': 'scheduledActivityCategory',
'system_presence': 'systemPresence',
'organization_secondary_presence_id': 'organizationSecondaryPresenceId',
'routing_status': 'routingStatus',
'actual_activity_category': 'actualActivityCategory',
'is_out_of_office': 'isOutOfOffice',
'adherence_state': 'adherenceState',
'impact': 'impact',
'adherence_change_time': 'adherenceChangeTime',
'presence_update_time': 'presenceUpdateTime',
'active_queues': 'activeQueues',
'active_queues_modified_time': 'activeQueuesModifiedTime',
'removed_from_management_unit': 'removedFromManagementUnit'
}
self._user = None
self._management_unit_id = None
self._team = None
self._scheduled_activity_category = None
self._system_presence = None
self._organization_secondary_presence_id = None
self._routing_status = None
self._actual_activity_category = None
self._is_out_of_office = None
self._adherence_state = None
self._impact = None
self._adherence_change_time = None
self._presence_update_time = None
self._active_queues = None
self._active_queues_modified_time = None
self._removed_from_management_unit = None
@property
def user(self):
"""
Gets the user of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The user of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: WfmUserScheduleAdherenceUpdatedMuTopicUserReference
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param user: The user of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: WfmUserScheduleAdherenceUpdatedMuTopicUserReference
"""
self._user = user
@property
def management_unit_id(self):
"""
Gets the management_unit_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The management_unit_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._management_unit_id
@management_unit_id.setter
def management_unit_id(self, management_unit_id):
"""
Sets the management_unit_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param management_unit_id: The management_unit_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._management_unit_id = management_unit_id
@property
def team(self):
"""
Gets the team of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The team of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: WfmUserScheduleAdherenceUpdatedMuTopicUriReference
"""
return self._team
@team.setter
def team(self, team):
"""
Sets the team of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param team: The team of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: WfmUserScheduleAdherenceUpdatedMuTopicUriReference
"""
self._team = team
@property
def scheduled_activity_category(self):
"""
Gets the scheduled_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The scheduled_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._scheduled_activity_category
@scheduled_activity_category.setter
def scheduled_activity_category(self, scheduled_activity_category):
"""
Sets the scheduled_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param scheduled_activity_category: The scheduled_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._scheduled_activity_category = scheduled_activity_category
@property
def system_presence(self):
"""
Gets the system_presence of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The system_presence of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._system_presence
@system_presence.setter
def system_presence(self, system_presence):
"""
Sets the system_presence of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param system_presence: The system_presence of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._system_presence = system_presence
@property
def organization_secondary_presence_id(self):
"""
Gets the organization_secondary_presence_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The organization_secondary_presence_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._organization_secondary_presence_id
@organization_secondary_presence_id.setter
def organization_secondary_presence_id(self, organization_secondary_presence_id):
"""
Sets the organization_secondary_presence_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param organization_secondary_presence_id: The organization_secondary_presence_id of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._organization_secondary_presence_id = organization_secondary_presence_id
@property
def routing_status(self):
"""
Gets the routing_status of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The routing_status of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._routing_status
@routing_status.setter
def routing_status(self, routing_status):
"""
Sets the routing_status of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param routing_status: The routing_status of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
allowed_values = ["__EMPTY__", "OFF_QUEUE", "IDLE", "INTERACTING", "NOT_RESPONDING", "COMMUNICATING", "OFFLINE"]
if routing_status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for routing_status -> " + routing_status)
self._routing_status = "outdated_sdk_version"
else:
self._routing_status = routing_status
@property
def actual_activity_category(self):
"""
Gets the actual_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The actual_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._actual_activity_category
@actual_activity_category.setter
def actual_activity_category(self, actual_activity_category):
"""
Sets the actual_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param actual_activity_category: The actual_activity_category of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._actual_activity_category = actual_activity_category
@property
def is_out_of_office(self):
"""
Gets the is_out_of_office of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The is_out_of_office of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: bool
"""
return self._is_out_of_office
@is_out_of_office.setter
def is_out_of_office(self, is_out_of_office):
"""
Sets the is_out_of_office of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param is_out_of_office: The is_out_of_office of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: bool
"""
self._is_out_of_office = is_out_of_office
@property
def adherence_state(self):
"""
Gets the adherence_state of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The adherence_state of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._adherence_state
@adherence_state.setter
def adherence_state(self, adherence_state):
"""
Sets the adherence_state of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param adherence_state: The adherence_state of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
allowed_values = ["InAdherence", "OutOfAdherence", "Unscheduled", "Unknown", "Ignored"]
if adherence_state.lower() not in map(str.lower, allowed_values):
# print("Invalid value for adherence_state -> " + adherence_state)
self._adherence_state = "outdated_sdk_version"
else:
self._adherence_state = adherence_state
@property
def impact(self):
"""
Gets the impact of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The impact of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: str
"""
return self._impact
@impact.setter
def impact(self, impact):
"""
Sets the impact of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param impact: The impact of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: str
"""
self._impact = impact
@property
def adherence_change_time(self):
"""
Gets the adherence_change_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The adherence_change_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: datetime
"""
return self._adherence_change_time
@adherence_change_time.setter
def adherence_change_time(self, adherence_change_time):
"""
Sets the adherence_change_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param adherence_change_time: The adherence_change_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: datetime
"""
self._adherence_change_time = adherence_change_time
@property
def presence_update_time(self):
"""
Gets the presence_update_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The presence_update_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: datetime
"""
return self._presence_update_time
@presence_update_time.setter
def presence_update_time(self, presence_update_time):
"""
Sets the presence_update_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param presence_update_time: The presence_update_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: datetime
"""
self._presence_update_time = presence_update_time
@property
def active_queues(self):
"""
Gets the active_queues of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The active_queues of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: list[WfmUserScheduleAdherenceUpdatedMuTopicQueueReference]
"""
return self._active_queues
@active_queues.setter
def active_queues(self, active_queues):
"""
Sets the active_queues of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param active_queues: The active_queues of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: list[WfmUserScheduleAdherenceUpdatedMuTopicQueueReference]
"""
self._active_queues = active_queues
@property
def active_queues_modified_time(self):
"""
Gets the active_queues_modified_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The active_queues_modified_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: datetime
"""
return self._active_queues_modified_time
@active_queues_modified_time.setter
def active_queues_modified_time(self, active_queues_modified_time):
"""
Sets the active_queues_modified_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param active_queues_modified_time: The active_queues_modified_time of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: datetime
"""
self._active_queues_modified_time = active_queues_modified_time
@property
def removed_from_management_unit(self):
"""
Gets the removed_from_management_unit of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:return: The removed_from_management_unit of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:rtype: bool
"""
return self._removed_from_management_unit
@removed_from_management_unit.setter
def removed_from_management_unit(self, removed_from_management_unit):
"""
Sets the removed_from_management_unit of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:param removed_from_management_unit: The removed_from_management_unit of this WfmUserScheduleAdherenceUpdatedMuTopicUserScheduleAdherenceUpdate.
:type: bool
"""
self._removed_from_management_unit = removed_from_management_unit
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
2583a229d3a812adb1e72cf5cc77957bfb57a97e
|
7e27d2b844e962a567e0311a6eb5ccf3fcdc7b98
|
/lib/exabgp/application/cli.py
|
3abc6a4b8deb29611f0efd9ba3fe1b415a3a3d6d
|
[] |
no_license
|
slabakov/exabgp
|
1dbf6a98b06a2c2cdbeedf0954d0429f0dbf98fb
|
33f851d70715f4ba1792acc36436ef32b70c30c9
|
refs/heads/master
| 2020-12-30T19:46:09.570146 | 2015-06-05T15:36:19 | 2015-06-05T15:36:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,083 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
completion = {}
def __init__ (self, intro=''):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx): # pylint: disable=W0613
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self, line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self, _):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin': {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self, name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self, line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self, line):
pass
# next-hop
def do_med (self, line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self, _):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class Syntax (Completed):
completion = {
'announce': {
'route': {
},
'l2vpn': {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute': {
},
'show': {
'routes': {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
#
# repeat last command
#
# last = 'help'
# def do_last (self, line):
# "Print the input, replacing '$out' with the output of the last shell command"
# # Obviously not robust
# if hasattr(self, 'last_output'):
# print line.replace('$out', self.last_output)
_neighbors = set()
def do_neighbor (self, line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip>: limit the action to the defined neighbors"
print "neighbor exclude <ip>: remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self, name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self, _):
return True
do_q = do_quit
class Command (object):
def do_show (self,line):
self.request('show routes')
self.report()
import select
class Connection (object):
def __init__ (self,name):
self.read = open(name,'r+')
self.write = open(name,'w+')
def request (self,command):
self.write.write(command + '\n')
def report (self):
while select.select([self.read],[],[],5):
print self.read.readline()
def close (self):
self.read.close()
self.write.close()
class ExaBGP (Connection,Command,Syntax):
def __init__ (self,name='exabgp.cmd'):
Connection.__init__(self,name)
Syntax.__init__(self,'')
def main():
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f019de6bfe87e4a0abc40de090dffd0869c0bc61
|
70976a4a0526f7585f810921925cf8d19e6aabfa
|
/project/apps/registration/tests/factories.py
|
75a1b1bcb049fe20c0d38982e66af5692df52680
|
[
"BSD-2-Clause"
] |
permissive
|
barberscore/barberscore-api
|
36be50b943ed59ac2fc738069661f5b589354a36
|
1ed4c01ae35cad21282b573a492733837f956285
|
refs/heads/master
| 2023-09-03T21:14:57.358069 | 2023-07-08T20:45:03 | 2023-07-08T20:45:03 | 11,014,681 | 14 | 5 |
BSD-2-Clause
| 2023-02-08T01:18:17 | 2013-06-28T03:28:17 |
Python
|
UTF-8
|
Python
| false | false | 2,676 |
py
|
# Standard Library
import datetime
import rest_framework_jwt
# Third-Party
from factory import Faker # post_generation,
from factory import Iterator
from factory import LazyAttribute
from factory import PostGenerationMethodCall
from factory import RelatedFactory
from factory import Sequence
from factory import SubFactory
from factory.django import DjangoModelFactory
from factory.django import mute_signals
from factory.fuzzy import FuzzyInteger
# Django
from django.db.models.signals import pre_delete
from django.db.models.signals import pre_save
from django.db.models.signals import m2m_changed
# First-Party
from apps.registration.models import Assignment
from apps.registration.models import Contest
from apps.registration.models import Entry
from apps.registration.models import Session
from rest_framework_jwt.models import User
class AssignmentFactory(DjangoModelFactory):
# status = Assignment.STATUS.active
kind = Assignment.KIND.official
# convention = SubFactory('factories.ConventionFactory')
session = SubFactory('apps.registration.tests.factories.SessionFactory')
class Meta:
model = Assignment
class ContestFactory(DjangoModelFactory):
# status = Contest.STATUS.included
session = SubFactory('apps.registration.tests.factories.SessionFactory')
# award = SubFactory('factories.AwardFactory')
class Meta:
model = Contest
class EntryFactory(DjangoModelFactory):
status = Entry.STATUS.new
is_evaluation = True
is_private = False
session = SubFactory('apps.registration.tests.factories.SessionFactory')
# group = SubFactory('factories.GroupFactory')
class Meta:
model = Entry
class SessionFactory(DjangoModelFactory):
status = Session.STATUS.new
kind = Session.KIND.quartet
name = "International Championship"
district = Session.DISTRICT.bhs
is_invitational = False
num_rounds = 2
# convention = SubFactory('factories.ConventionFactory')
class Meta:
model = Session
# @post_generation
# def create_rounds(self, create, extracted, **kwargs):
# if create:
# for i in range(self.num_rounds):
# num = i + 1
# kind = self.num_rounds - i
# RoundFactory(
# session=self,
# num=num,
# kind=kind,
# )
@mute_signals(pre_delete, pre_save, m2m_changed)
class UserFactory(DjangoModelFactory):
name = Faker('name_male')
email = Faker('email')
password = PostGenerationMethodCall('set_password', 'password')
is_staff = False
class Meta:
model = User
|
[
"[email protected]"
] | |
5de7011859a215b253659cf1f2f4cd9ba586bbdc
|
6df0d7a677129e9b325d4fdb4bbf72d512dd08b2
|
/PycharmProjects/nsd_python_v02/day08/python_code_web/logs_engine/cals/cal_ip_converter.py
|
834453bfe8326dbd076717b52602cd8e2eef0ac2
|
[] |
no_license
|
yingxingtianxia/python
|
01265a37136f2ad73fdd142f72d70f7c962e0241
|
3e1a7617a4b6552bce4a7e15a182f30e1bae221e
|
refs/heads/master
| 2021-06-14T15:48:00.939472 | 2019-12-13T05:57:36 | 2019-12-13T05:57:36 | 152,200,507 | 0 | 0 | null | 2021-06-10T20:54:26 | 2018-10-09T06:40:10 |
Python
|
UTF-8
|
Python
| false | false | 639 |
py
|
#!/usr/bin/python
# coding=utf-8
"""
将IP地址翻译为具体地名
这里使用的是假的地名,真实情况,可以通过:
1)自己建立的地址库
2)网络上的开放地址库进行翻译
http://ip.taobao.com/service/getIpInfo.php?ip=120.25.63.167
http://ip.ws.126.net/ipquery?ip=120.25.63.167
"""
import data_save as cache
import sys
sys.path.append("..")
from defines import *
def ip2Location(ip=""):
return ips[ip][0], ips[ip][1]
def cal(log, parameters=[]):
city, province = ip2Location(log['ip'])
# print log['sdate'],city,province
cache.addOne("city=" + city)
cache.addOne("province=" + province)
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.