content
stringlengths 0
1.55M
|
---|
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.rds.rds_snapshot
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>>
"""<import_from_stmt>security_monkey.auditor Auditor Entity<import_from_stmt>security_monkey.watchers.rds.rds_snapshot RDSSnapshot<class_stmt>RDSSnapshotAuditor(Auditor)<block_start>index=RDSSnapshot.index<line_sep>i_am_singular=RDSSnapshot.i_am_singular<line_sep>i_am_plural=RDSSnapshot.i_am_plural<def_stmt>__init__ self accounts=<none> debug=<false><block_start>super(RDSSnapshotAuditor self).__init__(accounts=accounts debug=debug)<block_end><def_stmt>prep_for_audit self<block_start>super(RDSSnapshotAuditor self).prep_for_audit()<line_sep>self.FRIENDLY={account['identifier']:account['name']<for>account self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS']<if>account['label']<eq>'friendly'}<line_sep>self.THIRDPARTY={account['identifier']:account['name']<for>account self.OBJECT_STORE['ACCOUNTS']['DESCRIPTIONS']<if>account['label']<eq>'thirdparty'}<block_end><def_stmt>check_internet_accessible self item<block_start><if_stmt>'all'<in>item.config.get('Attributes' {}).get('restore' [])<block_start>entity=Entity(category='account' value='all')<line_sep>self.record_internet_access(item entity actions=['restore'])<block_end><block_end><def_stmt>check_friendly_cross_account self item<block_start>accounts=item.config.get('Attributes' {}).get('restore' [])<for_stmt>account accounts<block_start><if_stmt>account<eq>'all'<block_start><continue><block_end><if_stmt>account<in>self.FRIENDLY<block_start>entity=Entity(category='account' value=account account_name=self.FRIENDLY[account] account_identifier=account)<line_sep>self.record_friendly_access(item entity actions=['restore'])<block_end><block_end><block_end><def_stmt>check_thirdparty_cross_account self item<block_start>accounts=item.config.get('Attributes' {}).get('restore' [])<for_stmt>account accounts<block_start><if_stmt>account<eq>'all'<block_start><continue><block_end><if_stmt>account<in>self.THIRDPARTY<block_start>entity=Entity(category='account' value=account account_name=self.THIRDPARTY[account] account_identifier=account)<line_sep>self.record_thirdparty_access(item entity actions=['restore'])<block_end><block_end><block_end><def_stmt>check_unknown_cross_account self item<block_start>accounts=item.config.get('Attributes' {}).get('restore' [])<for_stmt>account accounts<block_start><if_stmt>account<eq>'all'<block_start><continue><block_end><if_stmt>account<not><in>self.FRIENDLY<and>account<not><in>self.THIRDPARTY<block_start>entity=Entity(category='account' value=account)<line_sep>self.record_unknown_access(item entity actions=['restore'])<block_end><block_end><block_end><block_end>
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This script retrieves all audit logs across an Oracle Cloud Infrastructure Tenancy.
# for a timespan defined by start_time and end_time.
# This sample script retrieves Audit events for last 5 days.
# This script will work at a tenancy level only.
<import_stmt>datetime<import_stmt>oci<def_stmt>get_subscription_regions identity tenancy_id<block_start>'''
To retrieve the list of all available regions.
'''<line_sep>list_of_regions=[]<line_sep>list_regions_response=identity.list_region_subscriptions(tenancy_id)<for_stmt>r list_regions_response.data<block_start>list_of_regions.append(r.region_name)<block_end><return>list_of_regions<block_end><def_stmt>get_compartments identity tenancy_id<block_start>'''
Retrieve the list of compartments under the tenancy.
'''<line_sep>list_compartments_response=oci.pagination.list_call_get_all_results(identity.list_compartments compartment_id=tenancy_id).data<line_sep>compartment_ocids=[c.id<for>c filter(<lambda>c:c.lifecycle_state<eq>'ACTIVE' list_compartments_response)]<line_sep><return>compartment_ocids<block_end><def_stmt>get_audit_events audit compartment_ocids start_time end_time<block_start>'''
Get events iteratively for each compartment defined in 'compartments_ocids'
for the region defined in 'audit'.
This method eagerly loads all audit records in the time range and it does
have performance implications of lot of audit records.
Ideally, the generator method in oci.pagination should be used to lazily
load results.
'''<line_sep>list_of_audit_events=[]<for_stmt>c compartment_ocids<block_start>list_events_response=oci.pagination.list_call_get_all_results(audit.list_events compartment_id=c start_time=start_time end_time=end_time).data<line_sep># Results for a compartment 'c' for a region defined
# in 'audit' object.
list_of_audit_events.extend(list_events_response)<line_sep><return>list_of_audit_events<block_end><block_end># Setting configuration
# Default path for configuration file is "~/.oci/config"
config=oci.config.from_file()<line_sep>tenancy_id=config["tenancy"]<line_sep># Initiate the client with the locally available config.
identity=oci.identity.IdentityClient(config)<line_sep># Timespan defined by variables start_time and end_time(today).
# ListEvents expects timestamps into RFC3339 format.
# For the purposes of sample script, logs of last 5 days.
end_time=datetime.datetime.utcnow()<line_sep>start_time=end_time+datetime.timedelta(days=-5)<line_sep># This array will be used to store the list of available regions.
regions=get_subscription_regions(identity tenancy_id)<line_sep># This array will be used to store the list of compartments in the tenancy.
compartments=get_compartments(identity tenancy_id)<line_sep>audit=oci.audit.audit_client.AuditClient(config)<line_sep># For each region get the logs for each compartment.
<for_stmt>r regions# Intialize with a region value.
<block_start>audit.base_client.set_region(r)<line_sep># To separate results by region use print here.
audit_events=get_audit_events(audit compartments start_time end_time)<line_sep># Results for a region 'r' for each compartment.
<if_stmt>audit_events<block_start>print(audit_events)<block_end><block_end>
|
<import_stmt>json<import_from_stmt>textwrap dedent<class_stmt>TestSetBootactionArgs<block_start><def_stmt>test_no_args self host<block_start>result=host.run('stack set bootaction args')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\
error - "action" argument is required
{action} {args=string} [os=string] [type=string]
''')<block_end><def_stmt>test_multiple_args self host<block_start>result=host.run('stack set bootaction args test foo')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\
error - "action" argument must be unique
{action} {args=string} [os=string] [type=string]
''')<block_end><def_stmt>test_invalid_action self host<block_start>result=host.run('stack set bootaction args test type=os args=test')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>'error - action "test" does not exist\n'<block_end><def_stmt>test_no_type self host<block_start>result=host.run('stack set bootaction args memtest')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\
error - "type" parameter is required
{action} {args=string} [os=string] [type=string]
''')<block_end><def_stmt>test_invalid_type self host<block_start>result=host.run('stack set bootaction args memtest type=foo')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\
error - "type" parameter must be "os" or "install"
{action} {args=string} [os=string] [type=string]
''')<block_end><def_stmt>test_no_args_parameter self host<block_start>result=host.run('stack set bootaction args memtest type=os')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\
error - "args" parameter is required
{action} {args=string} [os=string] [type=string]
''')<block_end><def_stmt>test_with_os self host# Add a test bootaction with an OS
<block_start>result=host.run('stack add bootaction test type=os os=ubuntu kernel=""')<assert_stmt>result.rc<eq>0<line_sep># Make sure the action got added
result=host.run('stack list bootaction test output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'args':<none> 'bootaction':'test' 'kernel':<none> 'os':'ubuntu' 'ramdisk':<none> 'type':'os'}]<line_sep># Set the bootaction args with a specified os
result=host.run(f'stack set bootaction args test type=os os=ubuntu args="test_args"')<assert_stmt>result.rc<eq>0<line_sep># Make sure the args got set
result=host.run('stack list bootaction test output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'args':'test_args' 'bootaction':'test' 'kernel':<none> 'os':'ubuntu' 'ramdisk':<none> 'type':'os'}]<block_end><def_stmt>test_os_is_null self host# Set the bootaction args with a null os
<block_start>result=host.run('stack set bootaction args memtest type=os args="test_args"')<assert_stmt>result.rc<eq>0<line_sep># Make sure the action got added
result=host.run('stack list bootaction memtest output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'args':'test_args' 'bootaction':'memtest' 'kernel':'kernel memtest' 'os':<none> 'ramdisk':<none> 'type':'os'}]<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>softbacks path<block_start>"""Measurements on a Selection of Paperback Books
This is a subset of the `allbacks` data frame which gives measurements
on the volume and weight of 8 paperback books.
This data frame contains the following columns:
volume
a numeric vector giving the book volumes in cubic centimeters
weight
a numeric vector giving the weights in grams
The bookshelf of <NAME>.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `softbacks.csv`.
Returns:
Tuple of np.ndarray `x_train` with 8 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='softbacks.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/DAAG/softbacks.csv'<line_sep>maybe_download_and_extract(path url save_file_name='softbacks.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
|
<import_stmt>os<import_stmt>sqlite3<import_from_stmt>functools update_wrapper<import_from_stmt>dagster check<import_from_stmt>.sql run_migrations_offline<as>run_migrations_offline_<import_from_stmt>.sql run_migrations_online<as>run_migrations_online_<def_stmt>run_migrations_offline *args **kwargs<block_start><try_stmt><block_start>run_migrations_offline_(*args **kwargs)<block_end><except_stmt>sqlite3.DatabaseError<as>exc# This is to deal with concurrent execution -- if this table already exists thanks to a
# race with another process, we are fine and can continue.
<block_start><if_stmt><not>"table alembic_version already exists"<in>str(exc)<block_start><raise><block_end><block_end><block_end><def_stmt>run_migrations_online *args **kwargs<block_start><try_stmt><block_start>run_migrations_online_(*args **kwargs)<block_end><except_stmt>(sqlite3.DatabaseError sqlite3.OperationalError)<as>exc# This is to deal with concurrent execution -- if this table already exists thanks to a
# race with another process, we are fine and can continue.
<block_start><if_stmt><not>"table alembic_version already exists"<in>str(exc)<block_start><raise><block_end><block_end><block_end>update_wrapper(run_migrations_offline run_migrations_offline_)<line_sep>update_wrapper(run_migrations_online run_migrations_online_)<def_stmt>create_db_conn_string base_dir db_name<block_start>check.str_param(base_dir "base_dir")<line_sep>check.str_param(db_name "db_name")<line_sep>path_components=os.path.abspath(base_dir).split(os.sep)<line_sep>db_file="{}.db".format(db_name)<line_sep><return>"sqlite:///{}".format("/".join(path_components+[db_file]))<block_end>
|
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>df=pd.read_csv('tests/bugs/issue_19/issue_19_data_1.csv')<import_stmt>datetime<def_stmt>convert_date x<block_start>y=np.nan<try_stmt><block_start>y=datetime.datetime.strptime(str(x) "%Y")<block_end><except_stmt># bad format
<block_start><pass><block_end><return>y<block_end>df['date']=df['date'].apply(convert_date)<line_sep>df_train=df[['date' 'number']].dropna().reset_index(drop=<true>)<line_sep>print(df_train)<import_stmt>pyaf.ForecastEngine<as>autof<line_sep>lEngine=autof.cForecastEngine()<line_sep>lEngine.train(iInputDS=df_train iTime='date' iSignal='number' iHorizon=7)<line_sep>print(lEngine.getModelInfo())<line_sep># lEngine.standardPlots('outputs/tour')
df_forecast=lEngine.forecast(iInputDS=df_train iHorizon=7)<line_sep>print(df_forecast.columns)<line_sep>print(df_forecast[['date' 'number_Forecast' 'number_Forecast_Lower_Bound' 'number_Forecast_Upper_Bound']].tail(7))<line_sep>
|
"""Constants for the tankerkoenig integration."""<line_sep>DOMAIN="tankerkoenig"<line_sep>NAME="tankerkoenig"<line_sep>CONF_FUEL_TYPES="fuel_types"<line_sep>CONF_STATIONS="stations"<line_sep>FUEL_TYPES=["e5" "e10" "diesel"]<line_sep>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>TrackingTools.MaterialEffects.MaterialPropagator_cfi MaterialPropagator<line_sep>MaterialPropagatorParabolicMF=MaterialPropagator.clone(SimpleMagneticField='ParabolicMf' ComponentName='PropagatorWithMaterialParabolicMf')<import_from_stmt>TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi OppositeMaterialPropagator<line_sep>OppositeMaterialPropagatorParabolicMF=OppositeMaterialPropagator.clone(SimpleMagneticField='ParabolicMf' ComponentName='PropagatorWithMaterialParabolicMfOpposite')<line_sep>
|
# Generated by Django 3.1.12 on 2021-09-09 14:23
<import_stmt>pkgutil<import_from_stmt>django.db connection<import_from_stmt>django.db migrations<import_from_stmt>django.db models<def_stmt>add_aws_views apps schema_editor<block_start>"""Create the AWS Materialized views from files."""<line_sep>version="_20210910"<line_sep>views={f"sql/views/{version}/reporting_aws_compute_summary":["" "_by_account" "_by_region" "_by_service"] f"sql/views/{version}/reporting_aws_cost_summary":["" "_by_account" "_by_region" "_by_service"] f"sql/views/{version}/reporting_aws_storage_summary":["" "_by_account" "_by_region" "_by_service"] f"sql/views/{version}/reporting_aws_database_summary":[""] f"sql/views/{version}/reporting_aws_network_summary":[""] }<for_stmt>base_path,view_tuple views.items()<block_start><for_stmt>view view_tuple<block_start>view_sql=pkgutil.get_data("reporting.provider.aws" f"{base_path}{view}{version}.sql")<line_sep>view_sql=view_sql.decode("utf-8")<with_stmt>connection.cursor()<as>cursor<block_start>cursor.execute(view_sql)<block_end><block_end><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("reporting" "0193_gcptopology")]<line_sep>operations=[migrations.AddField(model_name="awscostentrylineitemdailysummary" name="savingsplan_effective_cost" field=models.DecimalField(decimal_places=9 max_digits=24 null=<true>) ) migrations.RunPython(add_aws_views) ]<block_end>
|
<def_stmt>main <block_start><while_stmt><true><block_start>n,m=map(int input().split())<if_stmt>n<eq>0<and>m<eq>0<block_start><break><block_end>D=[9]<times>n<line_sep>H=[0]<times>m<for_stmt>d range(n)<block_start>D[d]=int(input())<block_end><for_stmt>k range(m)<block_start>H[k]=int(input())<block_end>D.sort()# sorting is an important
H.sort()# pre-processing step
gold=0<line_sep>d=0<line_sep>k=0# both arrays are sorted
<while_stmt>d<l>n<and>k<l>m# while not done yet
<block_start><while_stmt>k<l>m<and>D[d]<g>H[k]<block_start>k<augadd>1# find required knight k
<block_end><if_stmt>k<eq>m<block_start><break># loowater is doomed :S
<block_end>gold<augadd>H[k]# pay this amount of gold
d<augadd>1# next dragon
k<augadd>1# next knight
<block_end><if_stmt>d<eq>n<block_start>print("{}".format(gold))# all dragons are chopped
<block_end><else_stmt><block_start>print("Loowater is doomed!")<block_end><block_end><block_end>main()<line_sep>
|
<import_stmt>subprocess<import_stmt>sys<import_stmt>os<import_stmt>logging<import_from_stmt>.utils which<line_sep>lgr=logging.getLogger(__name__)<def_stmt>queue_conversion queue iterarg iterables queue_args=<none><block_start>"""
Write out conversion arguments to file and submit to a job scheduler.
Parses `sys.argv` for heudiconv arguments.
Parameters
----------
queue: string
Batch scheduler to use
iterarg: str
Multi-argument to index (`subjects` OR `files`)
iterables: int
Number of `iterarg` arguments
queue_args: string (optional)
Additional queue arguments for job submission
"""<line_sep>SUPPORTED_QUEUES={'SLURM':'sbatch'}<if_stmt>queue<not><in>SUPPORTED_QUEUES<block_start><raise>NotImplementedError("Queuing with %s is not supported" queue)<block_end><for_stmt>i range(iterables)<block_start>args=clean_args(sys.argv[1:] iterarg i)<line_sep># make arguments executable
heudiconv_exec=which("heudiconv")<or>"heudiconv"<line_sep>args.insert(0 heudiconv_exec)<line_sep>convertcmd=" ".join(args)<line_sep># will overwrite across subjects
queue_file=os.path.abspath('heudiconv-%s.sh'%queue)<with_stmt>open(queue_file 'wt')<as>fp<block_start>fp.write("#!/bin/bash\n")<if_stmt>queue_args<block_start><for_stmt>qarg queue_args.split()<block_start>fp.write("#SBATCH %s\n"%qarg)<block_end><block_end>fp.write(convertcmd+"\n")<block_end>cmd=[SUPPORTED_QUEUES[queue] queue_file]<line_sep>proc=subprocess.call(cmd)<block_end>lgr.info("Submitted %d jobs" iterables)<block_end><def_stmt>clean_args hargs iterarg iteridx<block_start>"""
Filters arguments for batch submission.
Parameters
----------
hargs: list
Command-line arguments
iterarg: str
Multi-argument to index (`subjects` OR `files`)
iteridx: int
`iterarg` index to submit
Returns
-------
cmdargs : list
Filtered arguments for batch submission
Example
--------
>>> from heudiconv.queue import clean_args
>>> cmd = ['heudiconv', '-d', '/some/{subject}/path',
... '-q', 'SLURM',
... '-s', 'sub-1', 'sub-2', 'sub-3', 'sub-4']
>>> clean_args(cmd, 'subjects', 0)
['heudiconv', '-d', '/some/{subject}/path', '-s', 'sub-1']
"""<if_stmt>iterarg<eq>"subjects"<block_start>iterarg=['-s' '--subjects']<block_end><elif_stmt>iterarg<eq>"files"<block_start>iterarg=['--files']<block_end><else_stmt><block_start><raise>ValueError("Cannot index %s"%iterarg)<block_end># remove these or cause an infinite loop
queue_args=['-q' '--queue' '--queue-args']<line_sep># control variables for multi-argument parsing
is_iterarg=<false><line_sep>itercount=0<line_sep>indicies=[]<line_sep>cmdargs=hargs[:]<for_stmt>i,arg enumerate(hargs)<block_start><if_stmt>arg.startswith('-')<and>is_iterarg# moving on to another argument
<block_start>is_iterarg=<false><block_end><if_stmt>is_iterarg<block_start><if_stmt>iteridx<ne>itercount<block_start>indicies.append(i)<block_end>itercount<augadd>1<block_end><if_stmt>arg<in>iterarg<block_start>is_iterarg=<true><block_end><if_stmt>arg<in>queue_args<block_start>indicies.extend([i i+1])<block_end><block_end><for_stmt>j sorted(indicies reverse=<true>)<block_start><del_stmt>cmdargs[j]<block_end><return>cmdargs<block_end>
|
<import_stmt>typing<import_stmt>pytest<import_from_stmt>bocadillo App configure create_client Templates settings<line_sep>@pytest.fixture(name="raw_app")<def_stmt>fixture_raw_app request<arrow>App<block_start>settings._clear()<line_sep><return>App()<block_end>@pytest.fixture(name="app")<def_stmt>fixture_app raw_app:App<arrow>App<block_start>configure(raw_app)<line_sep><return>raw_app<block_end>@pytest.fixture<def_stmt>client app<block_start><return>create_client(app)<block_end>@pytest.fixture(name="templates")<def_stmt>fixture_templates <block_start><return>Templates()<block_end><class_stmt>TemplateWrapper(typing.NamedTuple)<block_start>name:str<line_sep>context:dict<line_sep>rendered:str<line_sep>root:str<block_end><def_stmt>create_template templates:Templates tmpdir_factory dirname:str<arrow>TemplateWrapper<block_start>templates_dir=tmpdir_factory.mktemp(dirname)<line_sep>template=templates_dir.join("hello.html")<line_sep>template.write("<h1>Hello, {{ name }}!</h1>")<line_sep>templates.directory=str(templates_dir)<line_sep><return>TemplateWrapper(name="hello.html" context={"name":"Bocadillo"} rendered="<h1>Hello, Bocadillo!</h1>" root=str(templates_dir) )<block_end>@pytest.fixture<def_stmt>template_file templates:Templates tmpdir_factory<arrow>TemplateWrapper<block_start><return>create_template(templates tmpdir_factory dirname="templates")<block_end>
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>sys<import_from_stmt>argparse ArgumentParser<import_stmt>extractor<def_stmt>parse_args argv<block_start>"""
Parse and process arguments for frames-extractor tool
"""<line_sep>parser=ArgumentParser(description='Frames-extractor toolkit' allow_abbrev=<false>)<line_sep>parser.add_argument('-v' '--video' help='Full path to video file' required=<true>)<line_sep>parser.add_argument('-o' '--output_dir' help='Directory to save valuable frames from video.' required=<true>)<line_sep>parser.add_argument('-f' '--frame_step' type=int help='Read frames from video with step' default=1 required=<false>)<line_sep>parser.add_argument('-e' '--ext' type=str help='Extension of images in resulting dataset' choices=['jpg' 'png'] default='png' required=<false>)<line_sep>parser.add_argument('-s' '--dataset_size' type=int help='Number of frames to save from video as dataset. '<concat>'Should be less then video frames number' default=<none> required=<false>)<line_sep>args=parser.parse_args(args=argv)<line_sep><return>args.video args.output_dir args.dataset_size args.frame_step<block_end><if_stmt>__name__<eq>'__main__'<block_start>extractor.extract_frames_and_make_dataset(*parse_args(sys.argv[1:]))<block_end>
|
# tests/test_provider_ellisdon-oss_azuredevops.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:00 UTC)
<def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.ellisdon_oss.azuredevops<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_build_definition <import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_extension<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_project<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_release_definition <import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_release_environment <import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_release_task<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_release_tasks<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_release_variables <import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_service_endpoint <import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_service_hook<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_task_group<import_from_stmt>terrascript.resource.ellisdon_oss.azuredevops azuredevops_variable_group<block_end><def_stmt>test_datasource_import <block_start><import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_agent_queue<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_build_definition<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_group<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_project<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_definition<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_definition_environments <import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_definitions <import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_environment <import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_stage_variables <import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_release_tasks<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_service_endpoint<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_source_repository<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_task_group<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_user<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_variable_group<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_variable_groups<import_from_stmt>terrascript.data.ellisdon_oss.azuredevops azuredevops_workflow_task<block_end># TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.ellisdon_oss.azuredevops
#
# t = terrascript.provider.ellisdon_oss.azuredevops.azuredevops()
# s = str(t)
#
# assert 'https://github.com/ellisdon-oss/terraform-provider-azuredevops' in s
# assert '0.0.2' in s
|
# HTK Imports
<import_from_stmt>htk.lib.airtable.api AirtableAPI<line_sep>
|
<import_stmt>typedef_class<line_sep>a=typedef_class.RealA()<line_sep>a.a=3<line_sep>b=typedef_class.B()<line_sep>b.testA(a)<line_sep>
|
<import_from_stmt>securityheaders.models Directive<import_from_stmt>securityheaders.models.annotations requireddirectives requireddirectivevalues<line_sep>@requireddirectivevalues('form-action' 'frame-ancestors' 'report-uri' 'report-to' 'require-sri-for' 'plugin-types' 'worker-src' 'style-src' 'object-src' 'manifest-src' 'frame-src' 'default-src' 'connect-src' 'child-src')<class_stmt>CSPDirective(Directive)# Fetch directives
<block_start>CHILD_SRC='child-src' 'childSrc'<line_sep>CONNECT_SRC='connect-src' 'connectSrc'<line_sep>DEFAULT_SRC='default-src' 'defaultSrc'<line_sep>FONT_SRC='font-src' 'fontSrc'<line_sep>FRAME_SRC='frame-src' 'frameSrc'<line_sep>IMG_SRC='img-src' 'imgSrc'<line_sep>MEDIA_SRC='media-src' 'mediaSrc'<line_sep>OBJECT_SRC='object-src' 'objectSrc'<line_sep>SCRIPT_SRC='script-src' 'scriptSrc'<line_sep>STYLE_SRC='style-src' 'styleSrc'<line_sep>MANIFEST_SRC='manifest-src' 'manifestSrc'<line_sep>WORKER_SRC='worker-src' 'workerSrc'<line_sep># Document directives
BASE_URI='base-uri' 'baseUri'<line_sep>PLUGIN_TYPES='plugin-types' 'pluginTypes'<line_sep>SANDBOX='sandbox' 'sandBox'<line_sep>DISOWN_OPENER='disown-opener' 'disownOpener'<line_sep># Navigation directives
FORM_ACTION='form-action' 'formAction'<line_sep>FRAME_ANCESTORS='frame-ancestors' 'frameAncestors'<line_sep># Reporting directives
REPORT_TO='report-to' 'reportTo'<line_sep>REPORT_URI='report-uri' 'reportUri'<line_sep># Other directives
BLOCK_ALL_MIXED_CONTENT='block-all-mixed-content' 'blockAllMixedContent'<line_sep>UPGRADE_INSECURE_REQUESTS='upgrade-insecure-requests' 'upgradeInsecureRequests'<line_sep>REFLECTED_XSS='reflected-xss' 'reflectedXss'<line_sep>REFERRER='referrer'<line_sep>REQUIRE_SRI_FOR='require-sri-for' 'requireSriFor'<line_sep>@classmethod<def_stmt>isDirective cls directive<block_start>""" Checks whether a given string is a directive
Args:
directive (str): the string to validate
"""<if_stmt>isinstance(directive CSPDirective)<block_start><return><true><block_end><return>any(directive.lower()<eq>item<for>item list(cls.keys()))<block_end><block_end>
|
# ############################################################
# Importing - Same For All Render Layer Tests
# ############################################################
<import_stmt>unittest<import_stmt>os<import_stmt>sys<import_from_stmt>view_layer_common *<line_sep># ############################################################
# Testing
# ############################################################
<class_stmt>UnitTesting(ViewLayerTesting)<block_start><def_stmt>test_scene_delete self<block_start>"""
See if a scene can be properly deleted
"""<import_stmt>bpy<line_sep>scene=bpy.context.scene<line_sep>bpy.data.scenes.new('New')<line_sep>bpy.data.scenes.remove(scene)<block_end><block_end># ############################################################
# Main - Same For All Render Layer Tests
# ############################################################
<if_stmt>__name__<eq>'__main__'<block_start>UnitTesting._extra_arguments=setup_extra_arguments(__file__)<line_sep>unittest.main()<block_end>
|
<import_stmt>json<import_stmt>re<import_stmt>scrapy<import_from_stmt>scrapy.selector Selector<import_from_stmt>locations.hours OpeningHours<import_from_stmt>locations.items GeojsonPointItem<class_stmt>TropicalSmoothieCafe(scrapy.Spider)<block_start>name="tropical_smoothie_cafe"<line_sep>item_attributes={"brand":"Tropical Smoothie Cafe" "brand_wikidata":"Q7845817"}<line_sep>allowed_domains=["locations.tropicalsmoothiecafe.com"]<line_sep>start_urls=("https://locations.tropicalsmoothiecafe.com/sitemap.xml" )<def_stmt>parse self response<block_start>xml=Selector(response)<line_sep>xml.remove_namespaces()<line_sep>urls=xml.xpath("//loc/text()").extract()<line_sep>urls=[url.strip()<for>url urls]<for_stmt>url urls<block_start>path=scrapy.utils.url.parse_url(url).path<if_stmt>re.match(r"^/.*/.*/.*$" path)<block_start><yield>scrapy.Request(url callback=self.parse_location)<block_end><block_end><block_end><def_stmt>parse_location self response<block_start>hours_spec=response.css(".Cafe-hours").xpath(".//@data-days").get()<line_sep>hours=self.parse_hours(json.loads(hours_spec))<if>hours_spec<else><none><line_sep>ref=(response.css("""
a.Header-orderOnline[href^="https://ordernow.tropicalsmoothie.com"],
a.Header-orderOnline[href^="https://order.tropicalsmoothie.com"],
a.Header-orderOnline[href^="https://order.tropicalsmoothiecafe.com"]
""").attrib["href"].split("/")[-1])<line_sep>properties={"name":response.xpath('//h1[@itemprop="name"]/text()').get() "extras":{"branch":response.css("div.Hero-city").xpath("./text()").get()} "addr_full":response.xpath('//*[@itemprop="streetAddress"]/@content').get() "city":response.xpath('//*[@itemprop="addressLocality"]/@content').get() "state":response.xpath('//*[@itemprop="addressRegion"]/text()').get() "postcode":response.xpath('//*[@itemprop="postalCode"]/text()').get() "phone":response.xpath('//*[@itemprop="telephone"]/text()').get() "website":response.url "opening_hours":hours "ref":ref "lat":response.xpath('//*[@itemprop="latitude"]/@content').get() "lon":response.xpath('//*[@itemprop="longitude"]/@content').get() }<line_sep><yield>GeojsonPointItem(**properties)<block_end><def_stmt>parse_hours self hours_json<block_start>opening_hours=OpeningHours()<for_stmt>date hours_json<block_start>day=date["day"][:2].capitalize()<for_stmt>interval date["intervals"]<block_start>start_hr,start_min=divmod(interval["start"] 100)<line_sep>end_hr,end_min=divmod(interval["end"] 100)<line_sep>opening_hours.add_range(day f"{start_hr}:{start_min}" f"{end_hr}:{end_min}")<block_end><block_end><return>opening_hours.as_opening_hours()<block_end><block_end>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-03 11:49
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("hordak" "0001_initial")]<line_sep>operations=[migrations.RunSQL("""
CREATE OR REPLACE FUNCTION check_leg()
RETURNS trigger AS
$$
DECLARE
transaction_sum DECIMAL(13, 2);
BEGIN
IF (TG_OP = 'DELETE') THEN
SELECT SUM(amount) INTO transaction_sum FROM hordak_leg WHERE transaction_id = OLD.transaction_id;
ELSE
SELECT SUM(amount) INTO transaction_sum FROM hordak_leg WHERE transaction_id = NEW.transaction_id;
END IF;
IF transaction_sum != 0 THEN
RAISE EXCEPTION 'Sum of transaction amounts must be 0';
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql
""" "DROP FUNCTION check_leg()" ) migrations.RunSQL("""
CREATE CONSTRAINT TRIGGER check_leg_trigger
AFTER INSERT OR UPDATE OR DELETE ON hordak_leg
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE check_leg();
""" "DROP TRIGGER IF EXISTS check_leg_trigger ON hordak_leg" ) ]<block_end>
|
# cfg file to write the VHDL templates
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># process
process=cms.Process("L1GtVhdlWriterTest")<line_sep># number of events and source
process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.source=cms.Source("EmptySource")<line_sep># configuration
process.load("L1TriggerConfig.L1GtConfigProducers.L1GtConfig_cff")<line_sep>process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1Menu2007_cff")<line_sep>#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1030.L1Menu2008_2E30_cff")
#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1031.L1Menu2008_2E31_cff")
#process.load("L1TriggerConfig.L1GtConfigProducers.Luminosity.lumi1x1032.L1MenuTestCondCorrelation_cff")
process.load("L1TriggerConfig.L1GtConfigProducers.l1GtVhdlWriter_cfi")<line_sep># path to be run
process.p=cms.Path(process.l1GtVhdlWriter)<line_sep># services
# Message Logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")<line_sep>process.MessageLogger.debugModules=['l1GtVhdlWriterTest']<line_sep>process.MessageLogger.cout=cms.untracked.PSet(INFO=cms.untracked.PSet(limit=cms.untracked.int32(-1)) threshold=cms.untracked.string('DEBUG') ## DEBUG
DEBUG=cms.untracked.PSet(## DEBUG, all messages
limit=cms.untracked.int32(-1)))<line_sep>
|
<import_from_stmt>fastapi FastAPI Request<line_sep>app=FastAPI()<line_sep>@app.get("/app")<def_stmt>read_main request:Request<block_start><return>{"message":"Hello World" "root_path":request.scope.get("root_path")}<block_end>
|
# -*- coding: utf-8 -*-
BADREQUEST=400<line_sep>UNAUTHORIZED=401<line_sep>FORBIDDEN=403<line_sep>GONE=410<line_sep>TOOMANYREQUESTS=412<class_stmt>DnsdbException(Exception)<block_start><def_stmt>__init__ self message errcode=500 detail=<none> msg_ch=u''<block_start>self.message=message<line_sep>self.errcode=errcode<line_sep>self.detail=detail<line_sep>self.msg_ch=msg_ch<line_sep>super(DnsdbException self).__init__()<block_end><def_stmt>__str__ self<block_start><return>self.message<block_end><def_stmt>json self<block_start><return>dict(code=self.errcode why=self.message)<block_end><block_end><class_stmt>Unauthorized(DnsdbException)<block_start><def_stmt>__init__ self message='Unauthorized' errcode=UNAUTHORIZED detail=<none> msg_ch=u''<block_start>super(Unauthorized self).__init__(message errcode detail msg_ch)<block_end><block_end><class_stmt>Forbidden(DnsdbException)<block_start><def_stmt>__init__ self message='Forbidden' errcode=FORBIDDEN detail=<none> msg_ch=u''<block_start>super(Forbidden self).__init__(message errcode detail msg_ch)<block_end><block_end><class_stmt>OperationLogErr(DnsdbException)<block_start><def_stmt>__init__ self message errcode=500 detail=<none> msg_ch=u''<block_start>super(OperationLogErr self).__init__(message errcode detail msg_ch)<block_end><block_end><class_stmt>BadParam(DnsdbException)<block_start><def_stmt>__init__ self message='Bad params' errcode=BADREQUEST detail=<none> msg_ch=u''<block_start>super(BadParam self).__init__(message errcode detail msg_ch)<block_end><block_end><class_stmt>UpdaterErr(DnsdbException)<block_start><pass><block_end><class_stmt>ConfigErr(UpdaterErr)<block_start><def_stmt>__init__ self message<block_start>super(ConfigErr self).__init__(message=message errcode=501)<block_end><block_end>
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<class_stmt>Config(object)<block_start><def_stmt>__init__ self<block_start>"""Set and get configurations for tf models.
Configurations:
training (bool): Whether in training mode or not. Defaults to True.
emb_max_partitions (int): The `max_partitions` for embedding variables
partitioned by `min_max_variable_partitioner`. Specially,
`EmbeddingVariable` uses `fixed_size_partitioner`.
Defaults to None means no partition.
emb_min_slice_size (int): The `min_slice_size` for embedding variables
partitioned by `min_max_variable_partitioner`. Defaults to 128K.
emb_live_steps (int): Global steps to live for inactive keys in embedding
variables. Defaults to None.
"""<line_sep>self.training=<true><line_sep>self.partitioner='min_max'<line_sep>self.emb_max_partitions=<none><line_sep>self.emb_min_slice_size=128<times>1024<line_sep>self.emb_live_steps=<none><block_end><block_end>conf=Config()<line_sep>
|
"""Miscellaneous bindings to ffmpeg."""<import_stmt>os<import_from_stmt>moviepy.config FFMPEG_BINARY<import_from_stmt>moviepy.decorators convert_parameter_to_seconds convert_path_to_string<import_from_stmt>moviepy.tools subprocess_call<line_sep>@convert_path_to_string(("inputfile" "outputfile"))@convert_parameter_to_seconds(("start_time" "end_time"))<def_stmt>ffmpeg_extract_subclip inputfile start_time end_time outputfile=<none> logger="bar"<block_start>"""Makes a new video file playing video file between two times.
Parameters
----------
inputfile : str
Path to the file from which the subclip will be extracted.
start_time : float
Moment of the input clip that marks the start of the produced subclip.
end_time : float
Moment of the input clip that marks the end of the produced subclip.
outputfile : str, optional
Path to the output file. Defaults to
``<inputfile_name>SUB<start_time>_<end_time><ext>``.
"""<if_stmt><not>outputfile<block_start>name,ext=os.path.splitext(inputfile)<line_sep>t1,t2=[int(1000<times>t)<for>t [start_time end_time]]<line_sep>outputfile="%sSUB%d_%d%s"%(name t1 t2 ext)<block_end>cmd=[FFMPEG_BINARY "-y" "-ss" "%0.2f"%start_time "-i" inputfile "-t" "%0.2f"%(end_time-start_time) "-map" "0" "-vcodec" "copy" "-acodec" "copy" "-copyts" outputfile ]<line_sep>subprocess_call(cmd logger=logger)<block_end>@convert_path_to_string(("videofile" "audiofile" "outputfile"))<def_stmt>ffmpeg_merge_video_audio videofile audiofile outputfile video_codec="copy" audio_codec="copy" logger="bar" <block_start>"""Merges video file and audio file into one movie file.
Parameters
----------
videofile : str
Path to the video file used in the merge.
audiofile : str
Path to the audio file used in the merge.
outputfile : str
Path to the output file.
video_codec : str, optional
Video codec used by FFmpeg in the merge.
audio_codec : str, optional
Audio codec used by FFmpeg in the merge.
"""<line_sep>cmd=[FFMPEG_BINARY "-y" "-i" audiofile "-i" videofile "-vcodec" video_codec "-acodec" audio_codec outputfile ]<line_sep>subprocess_call(cmd logger=logger)<block_end>@convert_path_to_string(("inputfile" "outputfile"))<def_stmt>ffmpeg_extract_audio inputfile outputfile bitrate=3000 fps=44100 logger="bar"<block_start>"""Extract the sound from a video file and save it in ``outputfile``.
Parameters
----------
inputfile : str
The path to the file from which the audio will be extracted.
outputfile : str
The path to the file to which the audio will be stored.
bitrate : int, optional
Bitrate for the new audio file.
fps : int, optional
Frame rate for the new audio file.
"""<line_sep>cmd=[FFMPEG_BINARY "-y" "-i" inputfile "-ab" "%dk"%bitrate "-ar" "%d"%fps outputfile ]<line_sep>subprocess_call(cmd logger=logger)<block_end>@convert_path_to_string(("inputfile" "outputfile"))<def_stmt>ffmpeg_resize inputfile outputfile size logger="bar"<block_start>"""Resizes a file to new size and write the result in another.
Parameters
----------
inputfile : str
Path to the file to be resized.
outputfile : str
Path to the output file.
size : list or tuple
New size in format ``[width, height]`` for the output file.
"""<line_sep>cmd=[FFMPEG_BINARY "-i" inputfile "-vf" "scale=%d:%d"%(size[0] size[1]) outputfile ]<line_sep>subprocess_call(cmd logger=logger)<block_end>@convert_path_to_string(("inputfile" "outputfile" "output_dir"))<def_stmt>ffmpeg_stabilize_video inputfile outputfile=<none> output_dir="" overwrite_file=<true> logger="bar"<block_start>"""
Stabilizes ``filename`` and write the result to ``output``.
Parameters
----------
inputfile : str
The name of the shaky video.
outputfile : str, optional
The name of new stabilized video. Defaults to appending '_stabilized' to
the input file name.
output_dir : str, optional
The directory to place the output video in. Defaults to the current
working directory.
overwrite_file : bool, optional
If ``outputfile`` already exists in ``output_dir``, then overwrite
``outputfile`` Defaults to True.
"""<if_stmt><not>outputfile<block_start>without_dir=os.path.basename(inputfile)<line_sep>name,ext=os.path.splitext(without_dir)<line_sep>outputfile=f"{name}_stabilized{ext}"<block_end>outputfile=os.path.join(output_dir outputfile)<line_sep>cmd=[FFMPEG_BINARY "-i" inputfile "-vf" "deshake" outputfile]<if_stmt>overwrite_file<block_start>cmd.append("-y")<block_end>subprocess_call(cmd logger=logger)<block_end>
|
# -*- coding: utf-8 -*-
# Copyright 2017 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>calendar timegm<import_stmt>re<import_stmt>logging<import_from_stmt>django.conf settings<import_from_stmt>django.core.urlresolvers reverse_lazy<import_from_stmt>django.shortcuts redirect<import_from_stmt>django.utils.timezone timedelta now<import_from_stmt>django.contrib.auth login<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.http.response HttpResponseServerError<line_sep>logger=logging.getLogger(__name__)<class_stmt>RequiredInterstitial(object)<block_start>"""
Some organizations may require an acceptable use policy or similar to be displayed upon logon,
the setting REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL will specify how often the AUP should be displayed
in hours as a positive integer or 0 to indicate it should be displayed once per application logon.
Omitting this setting will bypass the interstitial.
To Use:
- Add to settings.MIDDLEWARE_CLASSES: 'missions.middleware.RequiredInterstitial'
- Ensure you specify a value in settings for the key REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL
"""<def_stmt>process_request self request<block_start><try_stmt><block_start>display_interval=settings.REQUIRED_INTERSTITIAL_DISPLAY_INTERVAL<block_end><except_stmt>AttributeError# Setting not defined, so assume we don't want the interstitial to display
<block_start><return><none><block_end><try_stmt><block_start><if_stmt>display_interval<eq>0<and>request.session['last_acknowledged_interstitial']<block_start><return><none><block_end><else_stmt><block_start>max_age=timedelta(hours=display_interval).total_seconds()<if_stmt>timegm(now().timetuple())-request.session['last_acknowledged_interstitial']<l>max_age<block_start><return><none><block_end><block_end><block_end><except_stmt>KeyError<block_start><pass><block_end>path=request.get_full_path()<if_stmt>re.match(str(reverse_lazy('login-interstitial')) path)<or>re.match(str(reverse_lazy('login')) path)<or>re.match(str(reverse_lazy('logout')) path)<or>re.match(settings.STATIC_URL+r'.+' path)<block_start><return><none><block_end><return>redirect('login-interstitial')<block_end><block_end>
|
<import_from_stmt>tests.test_helper *<import_from_stmt>braintree.merchant_account.business_details BusinessDetails<class_stmt>TestBusinessDetails(unittest.TestCase)<block_start><def_stmt>test_repr_has_all_fields self<block_start>details=BusinessDetails({"dba_name":"<NAME>" "legal_name":"<NAME>" "tax_id":"123001234" "address":{"street_address":"123 First St" "region":"Las Vegas" "locality":"NV" }})<line_sep>regex=r"<BusinessDetails {dba_name: '<NAME>', legal_name: '<NAME>', tax_id: '123001234', address_details: <AddressDetails {street_address: '123 First St', locality: 'NV', region: 'Las Vegas'} at \w+>} at \w+>"<line_sep>matches=re.match(regex repr(details))<line_sep>self.assertTrue(matches)<block_end><block_end>
|
<import_from_stmt>.model HFSeq2SeqTGModel<line_sep>
|
<import_from_stmt>types SimpleNamespace<import_stmt>pytest<import_from_stmt>reader._types entry_data_from_obj<import_from_stmt>reader._types EntryData<import_from_stmt>reader._types FeedData<import_from_stmt>reader._types fix_datetime_tzinfo<import_from_stmt>reader._types tag_filter_argument<line_sep>TAG_DATA=[([] [<none> [] () [[]] (() ) [[] []]]) ([[<true>]] [<true> [<true>] [[<true>]]]) ([[<false>]] [<false> [<false>] [[<false>]]]) ([[<true>] [<false>]] [[<true> <false>] [[<true>] [<false>]]]) ([[<true> <false>]] [[[<true> <false>]]]) ([[(<false> 'one')]] [['one'] [['one']] ['one' []] [[] ['one'] []]]) ([[(<false> 'one')] [(<true> 'two')]] [['one' '-two'] [['one'] ['-two']]]) ([[(<false> 'one') (<true> 'two')]] [[['one' '-two']]]) ([[<true>] [(<false> 'one')]] [[<true> 'one'] [<true> ['one']] [[<true>] 'one']]) ([[(<false> 'one') <false>]] [[['one' <false>]]]) ]<line_sep>TAG_DATA_FLAT=[(input expected)<for>expected,inputs TAG_DATA<for>input inputs]<line_sep>@pytest.mark.parametrize('input, expected' TAG_DATA_FLAT)<def_stmt>test_tag_filter_argument input expected<block_start><assert_stmt>tag_filter_argument(input)<eq>expected<block_end>DEFINITELY_NOT_TAGS=[0 1 2 {} set() object()]<line_sep>TAG_DATA_BAD=[("argument must be" DEFINITELY_NOT_TAGS+['' 'one' '-one']) ("must be non-empty" [[''] ['-'] [['']] [['-']]]) ("elements of argument must be" [[t]<for>t DEFINITELY_NOT_TAGS]+[[[t]]<for>t DEFINITELY_NOT_TAGS] ) ]<line_sep>TAG_DATA_BAD_FLAT=[(input error)<for>error,inputs TAG_DATA_BAD<for>input inputs]<line_sep>@pytest.mark.parametrize('input, error' TAG_DATA_BAD_FLAT)<def_stmt>test_tag_filter_argument_error input error<block_start><with_stmt>pytest.raises(ValueError)<as>excinfo<block_start>tag_filter_argument(input 'argument')<block_end><assert_stmt>error<in>str(excinfo.value)<block_end>@pytest.mark.parametrize('data_file' ['full' 'empty'])<def_stmt>test_entry_data_from_obj data_dir data_file<block_start>expected={'url_base':'' 'rel_base':''}<line_sep>exec(data_dir.join(f'{data_file}.rss.py').read() expected)<for_stmt>i,entry enumerate(expected['entries'])<block_start>entry_utc=fix_datetime_tzinfo(entry 'updated' 'published')<assert_stmt>entry<eq>entry_data_from_obj(entry_utc) i<line_sep>entry_dict=entry_utc._asdict()<if_stmt>'content'<in>entry_dict<block_start>entry_dict['content']=[c._asdict()<for>c entry_dict['content']]<block_end><if_stmt>'enclosures'<in>entry_dict<block_start>entry_dict['enclosures']=[e._asdict()<for>e entry_dict['enclosures']]<block_end><assert_stmt>entry<eq>entry_data_from_obj(entry_dict) i<block_end><block_end>@pytest.mark.parametrize('exc, entry' [(AttributeError SimpleNamespace()) (AttributeError SimpleNamespace(feed_url='feed')) (AttributeError SimpleNamespace(id='id')) (TypeError SimpleNamespace(feed_url='feed' id=1)) (TypeError SimpleNamespace(feed_url='feed' id=<none>)) (TypeError SimpleNamespace(feed_url='feed' id='id' updated=1)) (TypeError SimpleNamespace(feed_url='feed' id='id' title=1)) (TypeError SimpleNamespace(feed_url='feed' id='id' content=1)) (AttributeError SimpleNamespace(feed_url='feed' id='id' content=[SimpleNamespace()]) ) (TypeError SimpleNamespace(feed_url='feed' id='id' content=[SimpleNamespace(value=1)]) ) (TypeError SimpleNamespace(feed_url='feed' id='id' content=[SimpleNamespace(value='value' type=1)] ) ) (AttributeError SimpleNamespace(feed_url='feed' id='id' enclosures=[SimpleNamespace()]) ) (TypeError SimpleNamespace(feed_url='feed' id='id' enclosures=[SimpleNamespace(href=1)]) ) (TypeError SimpleNamespace(feed_url='feed' id='id' enclosures=[SimpleNamespace(href='href' type=1)] ) ) (TypeError SimpleNamespace(feed_url='feed' id='id' enclosures=[SimpleNamespace(href='href' length='1')] ) ) ] )<def_stmt>test_entry_data_from_obj_errors exc entry<block_start><with_stmt>pytest.raises(exc)<block_start>entry_data_from_obj(entry)<block_end><with_stmt>pytest.raises(exc)<block_start>entry_dict=dict(vars(entry))<if_stmt>'content'<in>entry_dict<block_start>entry_dict['content']=[dict(vars(c))<for>c entry_dict['content']]<block_end><if_stmt>'enclosures'<in>entry_dict<block_start>entry_dict['enclosures']=[dict(vars(e))<for>e entry_dict['enclosures']]<block_end>entry_data_from_obj(entry_dict)<block_end><block_end>
|
<import_stmt>numpy<import_from_stmt>pyscf gto scf mcscf<line_sep>'''
Scan BeH2 molecule symmetric dissociation curve
Note the CI wave function might change symmetry in the scanning. Adjust
fcisolver parameters to maintain the right symmetry.
'''<def_stmt>run i dm0 mo0 ci0<block_start>x=i<line_sep>y=(2.54-0.46<times>x)<line_sep>x=x<times>0.529177249<line_sep>y=y<times>0.529177249<line_sep>mol=gto.M(verbose=0 atom=[['Be' (0. 0. 0.)] ['H' (x -y 0.)] ['H' (x y 0.)] ] basis='6-311G' symmetry=<true>)<line_sep>mf=scf.RHF(mol)<line_sep>ehf=mf.scf(dm0)<line_sep>mc=mcscf.CASSCF(mf 2 2)<line_sep>mc.fcisolver.davidson_only=<true># force the CI solver stick on (A1)^2(B1)^0 configuration
<if_stmt>mo0<is><not><none><block_start>mo0=mcscf.project_init_guess(mc mo0)<block_end>emc=mc.mc1step(mo0 ci0)[0]<line_sep>print('%2.1f bohr, HF energy: %12.8f, CASSCF energy: %12.8f'%(i ehf emc))<line_sep><return>mf mc<block_end>dm0=mo0=ci=<none><for_stmt>i reversed(numpy.arange(1.0 4.1 .1))<block_start>mf,mc=run(i dm0 mo0 ci)<line_sep>dm0=mf.make_rdm1()<line_sep>mo_coeff=mc.mo_coeff<line_sep>ci=mc.ci<block_end>
|
<import_stmt>pytest<import_from_stmt>users.signals warn_insecure<line_sep>@pytest.mark.django_db<def_stmt>test_warn_insecure_emits_a_warning_when_no_user_exists recwarn# When
<block_start>warn_insecure(<none>)<line_sep># Then
<assert_stmt>len(recwarn)<eq>1<line_sep>w=recwarn.pop()<assert_stmt>issubclass(w.category RuntimeWarning)<block_end>@pytest.mark.django_db<def_stmt>test_warn_insecure_emits_no_warning_when_user_exists admin_user recwarn# When
<block_start>warn_insecure(<none>)<line_sep># Then
<assert_stmt>len(recwarn)<eq>0<block_end>
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
<import_stmt>gitutils<import_stmt>htmlutils<import_stmt>diff<import_from_stmt>operation Operation OperationResult<class_stmt>FetchLines(Operation)<block_start><def_stmt>__init__ self<block_start>Operation.__init__(self {"repository_id":int "path":str "sha1":str "ranges":[{"offset":int "count":int "context":bool}] "tabify":bool} accept_anonymous_user=<true>)<block_end><def_stmt>process self db user repository_id path sha1 ranges tabify<block_start>repository=gitutils.Repository.fromId(db repository_id)<line_sep>cursor=db.cursor()<def_stmt>getContext offset<block_start>cursor.execute("""SELECT context
FROM codecontexts
WHERE sha1=%s
AND %s BETWEEN first_line AND last_line
ORDER BY first_line DESC
LIMIT 1""" (sha1 offset))<line_sep>row=cursor.fetchone()<if_stmt>row<block_start><return>row[0]<block_end><else_stmt><block_start><return><none><block_end><block_end>file=diff.File(repository=repository path=path new_sha1=sha1)<line_sep>file.loadNewLines(highlighted=<true> request_highlight=<true>)<if_stmt>tabify<block_start>tabwidth=file.getTabWidth()<line_sep>indenttabsmode=file.getIndentTabsMode()<block_end><def_stmt>processRange offset count context<block_start><if_stmt>context<block_start>context=getContext(offset)<block_end><else_stmt><block_start>context=<none><block_end># Offset is a 1-based line number.
start=offset-1<line_sep># If count is -1, fetch all lines.
end=start+count<if>count<g>-1<else><none><line_sep>lines=file.newLines(highlighted=<true>)[start:end]<if_stmt>tabify<block_start>lines=[htmlutils.tabify(line tabwidth indenttabsmode)<for>line lines]<block_end><return>{"lines":lines "context":context}<block_end><return>OperationResult(ranges=[processRange(**line_range)<for>line_range ranges])<block_end><block_end>
|
#
# This file is part of LiteX-Boards.
# FPGA Board Info : https://shop.trenz-electronic.de/en/TE0725-03-35-2C-FPGA-Module-with-Xilinx-Artix-7-XC7A35T-2CSG324C-2-x-50-Pin-with-2.54-mm-pitch
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
<import_from_stmt>litex.build.generic_platform *<import_from_stmt>litex.build.xilinx XilinxPlatform<import_from_stmt>litex.build.openocd OpenOCD<line_sep># IOs ----------------------------------------------------------------------------------------------
_io=[# Clk / Rst
("clk100" 0 Pins("P17") IOStandard("LVCMOS33")) ("cpu_reset" 0 Pins("T8") IOStandard("LVCMOS33")) # Leds
("user_led" 0 Pins("M16") IOStandard("LVCMOS33")) # Serial
("serial" 0 Subsignal("tx" Pins("L18")) Subsignal("rx" Pins("M18")) IOStandard("LVCMOS33")) # SPIFlash
("spiflash" 0 Subsignal("cs_n" Pins("L13")) Subsignal("clk" Pins("E9")) Subsignal("mosi" Pins("K17")) Subsignal("miso" Pins("K18")) Subsignal("wp" Pins("L14")) Subsignal("hold" Pins("M14")) IOStandard("LVCMOS33") ) ("spiflash4x" 0 Subsignal("cs_n" Pins("L13")) Subsignal("clk" Pins("E9")) Subsignal("dq" Pins("K17 K18 L14 M14")) IOStandard("LVCMOS33")) # HyperRAM
("hyperram" 0 Subsignal("dq" Pins("E17 B17 F18 F16 G17 D18 B18 A16") IOStandard("SSTL18_II")) Subsignal("rwds" Pins("E18") IOStandard("SSTL18_II")) Subsignal("cs_n" Pins("D17") IOStandard("SSTL18_II")) Subsignal("rst_n" Pins("J17") IOStandard("SSTL18_II")) Subsignal("clk_p" Pins("A13") IOStandard("DIFF_SSTL18_II")) Subsignal("clk_n" Pins("A14") IOStandard("DIFF_SSTL18_II")) Misc("SLEW=FAST") ) ]<line_sep># Connectors ---------------------------------------------------------------------------------------
_connectors=[("j1" "C6 C5 B7 B6 A6 A5 D8 C7" "E6 E5 E7 D7 C4 B4 A4 A3" "B1 A1 B3 B2 D5 D4 E3 D3" "F4 F3 E2 D2 H2 G2 C2 C1" "H1 G1 F1 E1 G6 F6 J3 J2" "K2 K1") ("j2" "L1 M1 N2 N1 M3 M2 U1 V1" "U4 U3 U2 V2 V5 V4 R3 T3" "T5 T4 N5 P5 P4 P3 P2 R2" "M4 N4 R1 T1 M6 N6 R6 R5" "V7 V6 U9 V9 U7 U6 R7 T6" "R8") ]<line_sep># Platform -----------------------------------------------------------------------------------------
<class_stmt>Platform(XilinxPlatform)<block_start>default_clk_name="clk100"<line_sep>default_clk_period=1e9/100e6<def_stmt>__init__ self<block_start>XilinxPlatform.__init__(self "xc7a35tcsg324-2" _io _connectors toolchain="vivado")<line_sep>self.toolchain.bitstream_commands=["set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]"]<line_sep>self.toolchain.additional_commands=["write_cfgmem -force -format bin -interface spix4 -size 16"<concat>" -loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"]<line_sep>self.add_platform_command("set_property CFGBVS VCCO [current_design]")<line_sep>self.add_platform_command("set_property CONFIG_VOLTAGE 3.3 [current_design]")<block_end><def_stmt>create_programmer self<block_start><return>OpenOCD("openocd_xc7_ft2232.cfg" "bscan_spi_xc7a35t.bit")<block_end><def_stmt>do_finalize self fragment<block_start>XilinxPlatform.do_finalize(self fragment)<line_sep>self.add_period_constraint(self.lookup_request("clk100" loose=<true>) 1e9/100e6)<block_end><block_end># "set_property SEVERITY {{Warning}} [get_drc_checks UCIO-1]"]
|
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>six.moves range<import_stmt>os<import_stmt>logging<line_sep>logging.basicConfig(level=logging.DEBUG)<import_stmt>sys<line_sep>#sys.stdout = sys.stderr
# Prevent reaching to maximum recursion depth in `theano.tensor.grad`
#sys.setrecursionlimit(2 ** 20)
<import_stmt>numpy<as>np<line_sep>np.random.seed(2<power>10)<import_from_stmt>tensorflow.keras.datasets cifar10<import_from_stmt>tensorflow.keras.models Model<import_from_stmt>tensorflow.keras.layers Conv2D AveragePooling2D BatchNormalization Dropout Input Activation Add Dense Flatten<import_from_stmt>tensorflow.keras.optimizers SGD<import_from_stmt>tensorflow.keras.regularizers l2<import_from_stmt>tensorflow.keras.callbacks LearningRateScheduler ModelCheckpoint<import_from_stmt>tensorflow.keras.preprocessing.image ImageDataGenerator<import_from_stmt>tensorflow.keras.utils to_categorical<import_from_stmt>tensorflow.keras backend<as>K<import_from_stmt>utils mk_dir<line_sep># ================================================
# DATA CONFIGURATION:
logging.debug("Loading data...")<line_sep>nb_classes=10<line_sep>image_size=32<line_sep>(X_train y_train),(X_test y_test)=cifar10.load_data()<line_sep>X_train=X_train.astype('float32')<line_sep>X_test=X_test.astype('float32')<line_sep># convert class vectors to binary class matrices
Y_train=to_categorical(y_train nb_classes)<line_sep>Y_test=to_categorical(y_test nb_classes)<line_sep># ================================================
# ================================================
# NETWORK/TRAINING CONFIGURATION:
logging.debug("Loading network/training configuration...")<line_sep>depth=28# table 5 on page 8 indicates best value (4.17) CIFAR-10
k=10# 'widen_factor'; table 5 on page 8 indicates best value (4.17) CIFAR-10
dropout_probability=0# table 6 on page 10 indicates best value (4.17) CIFAR-10
weight_decay=0.0005# page 10: "Used in all experiments"
batch_size=128# page 8: "Used in all experiments"
# Regarding nb_epochs, lr_schedule and sgd, see bottom page 10:
nb_epochs=200<line_sep>lr_schedule=[60 120 160]# epoch_step
<def_stmt>schedule epoch_idx<block_start><if_stmt>(epoch_idx+1)<l>lr_schedule[0]<block_start><return>0.1<block_end><elif_stmt>(epoch_idx+1)<l>lr_schedule[1]<block_start><return>0.02# lr_decay_ratio = 0.2
<block_end><elif_stmt>(epoch_idx+1)<l>lr_schedule[2]<block_start><return>0.004<block_end><return>0.0008<block_end>sgd=SGD(lr=0.1 momentum=0.9 nesterov=<true>)<line_sep># Other config from code; throughtout all layer:
use_bias=<false># following functions 'FCinit(model)' and 'DisableBias(model)' in utils.lua
weight_init="he_normal"# follows the 'MSRinit(model)' function in utils.lua
# Keras specific
<if_stmt>K.image_data_format()<eq>"th"<block_start>logging.debug("image_dim_ordering = 'th'")<line_sep>channel_axis=1<line_sep>input_shape=(3 image_size image_size)<block_end><else_stmt><block_start>logging.debug("image_dim_ordering = 'tf'")<line_sep>channel_axis=-1<line_sep>input_shape=(image_size image_size 3)<block_end># ================================================
# ================================================
# OUTPUT CONFIGURATION:
print_model_summary=<true><line_sep>save_model=<true><line_sep>save_model_plot=<false><line_sep>MODEL_PATH=os.environ.get('MODEL_PATH' 'models/')<line_sep>CHECKPOINT_PATH=os.environ.get('CHECKPOINT_PATH' 'checkpoints/')<line_sep># ================================================
# Wide residual network http://arxiv.org/abs/1605.07146
<def_stmt>_wide_basic n_input_plane n_output_plane stride<block_start><def_stmt>f net# format of conv_params:
# [ [nb_col="kernel width", nb_row="kernel height",
# subsample="(stride_vertical,stride_horizontal)",
# border_mode="same" or "valid"] ]
# B(3,3): orignal <<basic>> block
<block_start>conv_params=[[3 3 stride "same"] [3 3 (1 1) "same"]]<line_sep>n_bottleneck_plane=n_output_plane<line_sep># Residual block
<for_stmt>i,v enumerate(conv_params)<block_start><if_stmt>i<eq>0<block_start><if_stmt>n_input_plane<ne>n_output_plane<block_start>net=BatchNormalization(axis=channel_axis)(net)<line_sep>net=Activation("relu")(net)<line_sep>convs=net<block_end><else_stmt><block_start>convs=BatchNormalization(axis=channel_axis)(net)<line_sep>convs=Activation("relu")(convs)<block_end>convs=Conv2D(n_bottleneck_plane (v[0] v[1]) strides=v[2] padding=v[3] kernel_initializer=weight_init kernel_regularizer=l2(weight_decay) use_bias=use_bias)(convs)<block_end><else_stmt><block_start>convs=BatchNormalization(axis=channel_axis)(convs)<line_sep>convs=Activation("relu")(convs)<if_stmt>dropout_probability<g>0<block_start>convs=Dropout(dropout_probability)(convs)<block_end>convs=Conv2D(n_bottleneck_plane (v[0] v[1]) strides=v[2] padding=v[3] kernel_initializer=weight_init kernel_regularizer=l2(weight_decay) use_bias=use_bias)(convs)<block_end><block_end># Shortcut Conntection: identity function or 1x1 convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in each
# group; see _layer() ).
<if_stmt>n_input_plane<ne>n_output_plane<block_start>shortcut=Conv2D(n_output_plane (1 1) strides=stride padding="same" kernel_initializer=weight_init kernel_regularizer=l2(weight_decay) use_bias=use_bias)(net)<block_end><else_stmt><block_start>shortcut=net<block_end><return>Add()([convs shortcut])<block_end><return>f<block_end># "Stacking Residual Units on the same stage"
<def_stmt>_layer block n_input_plane n_output_plane count stride<block_start><def_stmt>f net<block_start>net=block(n_input_plane n_output_plane stride)(net)<for_stmt>i range(2 int(count+1))<block_start>net=block(n_output_plane n_output_plane stride=(1 1))(net)<block_end><return>net<block_end><return>f<block_end><def_stmt>create_model <block_start>logging.debug("Creating model...")<assert_stmt>((depth-4)%6<eq>0)<line_sep>n=(depth-4)/6<line_sep>inputs=Input(shape=input_shape)<line_sep>n_stages=[16 16<times>k 32<times>k 64<times>k]<line_sep>conv1=Conv2D(n_stages[0] (3 3) strides=1 padding="same" kernel_initializer=weight_init kernel_regularizer=l2(weight_decay) use_bias=use_bias)(inputs)<line_sep># "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn=_wide_basic<line_sep>conv2=_layer(block_fn n_input_plane=n_stages[0] n_output_plane=n_stages[1] count=n stride=(1 1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3=_layer(block_fn n_input_plane=n_stages[1] n_output_plane=n_stages[2] count=n stride=(2 2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4=_layer(block_fn n_input_plane=n_stages[2] n_output_plane=n_stages[3] count=n stride=(2 2))(conv3)# "Stage 3 (spatial size: 8x8)"
batch_norm=BatchNormalization(axis=channel_axis)(conv4)<line_sep>relu=Activation("relu")(batch_norm)<line_sep># Classifier block
pool=AveragePooling2D(pool_size=(8 8) strides=(1 1) padding="same")(relu)<line_sep>flatten=Flatten()(pool)<line_sep>predictions=Dense(units=nb_classes kernel_initializer=weight_init use_bias=use_bias kernel_regularizer=l2(weight_decay) activation="softmax")(flatten)<line_sep>model=Model(inputs=inputs outputs=predictions)<line_sep><return>model<block_end><if_stmt>__name__<eq>'__main__'<block_start>model=create_model()<line_sep>model.compile(optimizer=sgd loss="categorical_crossentropy" metrics=['accuracy'])<if_stmt>print_model_summary<block_start>logging.debug("Model summary...")<line_sep>model.count_params()<line_sep>model.summary()<block_end><if_stmt>save_model_plot<block_start>logging.debug("Saving model plot...")<line_sep>mk_dir(MODEL_PATH)<import_from_stmt>tensorflow.keras.utils plot_model<line_sep>plot_model(model to_file=os.path.join(MODEL_PATH 'WRN-{0}-{1}.png'.format(depth k)) show_shapes=<true>)<block_end># Data Augmentation based on page 6 (see README for full details)
logging.debug("Creating ImageDataGenerators...")<line_sep>train_datagen=ImageDataGenerator(featurewise_center=<true> featurewise_std_normalization=<true> zca_whitening=<true> horizontal_flip=<true>)<line_sep>train_datagen.fit(X_train augment=<true> rounds=2)<line_sep>test_datagen=ImageDataGenerator(featurewise_center=<true> featurewise_std_normalization=<true> zca_whitening=<true>)<line_sep>test_datagen.fit(X_train)<line_sep>mk_dir(CHECKPOINT_PATH)<line_sep>callbacks=[LearningRateScheduler(schedule=schedule) ModelCheckpoint(CHECKPOINT_PATH+'/weights.{epoch:02d}-{val_loss:.2f}.hdf5' monitor='val_loss' verbose=1 save_best_only=<true> mode='auto')]<line_sep>logging.debug("Running training...")<line_sep># fit the model on the batches generated by train_datagen.flow()
model.fit(train_datagen.flow(X_train Y_train batch_size=batch_size shuffle=<true>) steps_per_epoch=X_train.shape[0]/batch_size epochs=nb_epochs validation_data=test_datagen.flow(X_test Y_test batch_size=batch_size) callbacks=callbacks)<if_stmt>save_model<block_start>logging.debug("Saving model...")<line_sep>mk_dir(MODEL_PATH)<line_sep>model.save(os.path.join(MODEL_PATH 'WRN-{0}-{1}.h5'.format(depth k)) overwrite=<true>)<block_end><block_end>
|
default_app_config="tests.testapp.apps.WagtailmediaTestsAppConfig"<line_sep>
|
<import_stmt>types<import_stmt>time<import_stmt>random<import_stmt>clip<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision.transforms<as>transforms<import_from_stmt>argparse ArgumentParser<import_stmt>pytorch_lightning<as>pl<import_from_stmt>encoding.models get_segmentation_model<import_from_stmt>encoding.nn SegmentationLosses<import_from_stmt>encoding.utils batch_pix_accuracy batch_intersection_union<line_sep># add mixed precision
<import_stmt>torch.cuda.amp<as>amp<import_stmt>numpy<as>np<import_from_stmt>encoding.utils.metrics SegmentationMetric<line_sep># get fewshot dataloader
<import_from_stmt>fewshot_data.model.hsnet HypercorrSqueezeNetwork<import_from_stmt>fewshot_data.common.logger Logger AverageMeter<import_from_stmt>fewshot_data.common.evaluation Evaluator<import_from_stmt>fewshot_data.common utils<import_from_stmt>fewshot_data.data.dataset FSSDataset<class_stmt>Fewshot_args<block_start>datapath='fewshot_data/Datasets_HSN'<line_sep>benchmark='pascal'<line_sep>logpath=''<line_sep>nworker=8<line_sep>bsz=20<line_sep>fold=0<block_end><class_stmt>LSegmentationModuleZS(pl.LightningModule)<block_start><def_stmt>__init__ self data_path dataset batch_size base_lr max_epochs **kwargs<block_start>super().__init__()<line_sep>self.batch_size=batch_size<line_sep>self.base_lr=base_lr/16<times>batch_size<line_sep>self.lr=self.base_lr<line_sep>self.epochs=max_epochs<line_sep>self.other_kwargs=kwargs<line_sep>self.enabled=<false>#True mixed precision will make things complicated and leading to NAN error
self.scaler=amp.GradScaler(enabled=self.enabled)<line_sep># for whether fix the encoder or not
self.fixed_encoder=<true><if>kwargs["use_pretrained"]<in>['clip_fixed']<else><false><line_sep># fewshot hyperparameters
self.cross_entropy_loss=nn.CrossEntropyLoss()<line_sep>self.args=self.get_fewshot_args()<if_stmt>data_path<block_start>self.args.datapath=data_path<block_end>self.args.logpath=self.other_kwargs["logpath"]<line_sep>self.args.benchmark=dataset<line_sep>self.args.bsz=self.batch_size<line_sep>self.args.fold=self.other_kwargs["fold"]<line_sep>self.args.nshot=self.other_kwargs["nshot"]<line_sep>self.args.finetune_mode=self.other_kwargs["finetune_mode"]<line_sep>Logger.initialize(self.args training=<true>)<line_sep>Evaluator.initialize()<if_stmt>kwargs["backbone"]<in>["clip_resnet101"]<block_start>FSSDataset.initialize(img_size=480 datapath=self.args.datapath use_original_imgsize=<false> imagenet_norm=<true>)<block_end><else_stmt><block_start>FSSDataset.initialize(img_size=480 datapath=self.args.datapath use_original_imgsize=<false>)<block_end>self.best_val_miou=float('-inf')<line_sep>self.num_classes=2<line_sep>self.labels=['others' '']<line_sep>self.fewshot_trn_loss=100<line_sep>self.fewshot_trn_miou=0<line_sep>self.fewshot_trn_fb_iou=0<block_end><def_stmt>get_fewshot_args self<block_start><return>Fewshot_args()<block_end><def_stmt>forward self x class_info<block_start><return>self.net(x class_info)<block_end><def_stmt>training_step self batch batch_nb<block_start><if_stmt>self.args.finetune_mode<block_start><if_stmt>self.args.nshot<eq>5<block_start>bshape=batch['support_imgs'].shape<line_sep>img=batch['support_imgs'].view(-1 bshape[2] bshape[3] bshape[4])<line_sep>target=batch['support_masks'].view(-1 bshape[3] bshape[4])<line_sep>class_info=batch['class_id']<for_stmt>i range(1 5)<block_start>class_info=torch.cat([class_info batch['class_id']])<block_end><with_stmt>amp.autocast(enabled=self.enabled)<block_start>out=self(img class_info)<line_sep>loss=self.criterion(out target)<line_sep>loss=self.scaler.scale(loss)<block_end>self.log("train_loss" loss)<line_sep># 3. Evaluate prediction
<if_stmt>self.args.benchmark<eq>'pascal'<and>batch['support_ignore_idxs']<is><not><none><block_start>query_ignore_idx=batch['support_ignore_idxs'].view(-1 bshape[3] bshape[4])<line_sep>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target query_ignore_idx)<block_end><else_stmt><block_start>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target)<block_end><block_end><else_stmt><block_start>img=batch['support_imgs'].squeeze(1)<line_sep>target=batch['support_masks'].squeeze(1)<line_sep>class_info=batch['class_id']<with_stmt>amp.autocast(enabled=self.enabled)<block_start>out=self(img class_info)<line_sep>loss=self.criterion(out target)<line_sep>loss=self.scaler.scale(loss)<block_end>self.log("train_loss" loss)<line_sep># 3. Evaluate prediction
<if_stmt>self.args.benchmark<eq>'pascal'<and>batch['support_ignore_idxs']<is><not><none><block_start>query_ignore_idx=batch['support_ignore_idxs'].squeeze(1)<line_sep>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target query_ignore_idx)<block_end><else_stmt><block_start>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target)<block_end><block_end><block_end><else_stmt><block_start>img=torch.cat([batch['support_imgs'].squeeze(1) batch['query_img']] dim=0)<line_sep>target=torch.cat([batch['support_masks'].squeeze(1) batch['query_mask']] dim=0)<line_sep>class_info=torch.cat([batch['class_id'] batch['class_id']] dim=0)<with_stmt>amp.autocast(enabled=self.enabled)<block_start>out=self(img class_info)<line_sep>loss=self.criterion(out target)<line_sep>loss=self.scaler.scale(loss)<block_end>self.log("train_loss" loss)<line_sep># 3. Evaluate prediction
<if_stmt>self.args.benchmark<eq>'pascal'<and>batch['query_ignore_idx']<is><not><none><block_start>query_ignore_idx=torch.cat([batch['support_ignore_idxs'].squeeze(1) batch['query_ignore_idx']] dim=0)<line_sep>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target query_ignore_idx)<block_end><else_stmt><block_start>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target)<block_end><block_end>self.train_average_meter.update(area_inter area_union class_info loss.detach().clone())<if_stmt>self.global_rank<eq>0<block_start>return_value=self.train_average_meter.write_process(batch_nb self.len_train_dataloader self.current_epoch write_batch_idx=50)<if_stmt>return_value<is><not><none><block_start>iou,fb_iou=return_value<line_sep>self.log("fewshot_train_iou" iou)<line_sep>self.log("fewshot_trainl_fb_iou" fb_iou)<block_end><block_end><return>loss<block_end><def_stmt>training_epoch_end self outs<block_start><if_stmt>self.global_rank<eq>0<block_start>self.train_average_meter.write_result('Training' self.current_epoch)<block_end>self.fewshot_trn_loss=utils.mean(self.train_average_meter.loss_buf)<line_sep>self.fewshot_trn_miou,self.fewshot_trn_fb_iou=self.train_average_meter.compute_iou()<line_sep>self.log("fewshot_trn_loss" self.fewshot_trn_loss)<line_sep>self.log("fewshot_trn_miou" self.fewshot_trn_miou)<line_sep>self.log("fewshot_trn_fb_iou" self.fewshot_trn_fb_iou)<block_end><def_stmt>validation_step self batch batch_nb<block_start><if_stmt>self.args.finetune_mode<and>self.args.nshot<eq>5<block_start>bshape=batch['query_img'].shape<line_sep>img=batch['query_img'].view(-1 bshape[2] bshape[3] bshape[4])<line_sep>target=batch['query_mask'].view(-1 bshape[3] bshape[4])<line_sep>class_info=batch['class_id']<for_stmt>i range(1 5)<block_start>class_info=torch.cat([class_info batch['class_id']])<block_end>out=self(img class_info)<line_sep>val_loss=self.criterion(out target)<line_sep># 3. Evaluate prediction
<if_stmt>self.args.benchmark<eq>'pascal'<and>batch['query_ignore_idx']<is><not><none><block_start>query_ignore_idx=batch['query_ignore_idx'].view(-1 bshape[3] bshape[4])<line_sep>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target query_ignore_idx)<block_end><else_stmt><block_start>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target)<block_end><block_end><else_stmt><block_start>img=batch['query_img'].squeeze(1)<line_sep>target=batch['query_mask'].squeeze(1)<line_sep>class_info=batch['class_id']<line_sep>out=self(img class_info)<line_sep>val_loss=self.criterion(out target)<line_sep># 3. Evaluate prediction
<if_stmt>self.args.benchmark<eq>'pascal'<and>batch['query_ignore_idx']<is><not><none><block_start>query_ignore_idx=batch['query_ignore_idx'].squeeze(1)<line_sep>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target query_ignore_idx)<block_end><else_stmt><block_start>area_inter,area_union=Evaluator.classify_prediction(out.argmax(dim=1) target)<block_end><block_end>self.val_average_meter.update(area_inter area_union class_info val_loss.detach().clone())<if_stmt>self.global_rank<eq>0<block_start>return_value=self.val_average_meter.write_process(batch_nb self.len_val_dataloader self.current_epoch write_batch_idx=50)<if_stmt>return_value<is><not><none><block_start>iou,fb_iou=return_value<line_sep>self.log("fewshot_val_iou" iou)<line_sep>self.log("fewshot_val_fb_iou" fb_iou)<block_end><block_end><block_end><def_stmt>validation_epoch_end self outs<block_start><if_stmt>self.global_rank<eq>0<block_start>self.val_average_meter.write_result('Validation' self.current_epoch)<block_end>val_loss=utils.mean(self.val_average_meter.loss_buf)<line_sep>val_miou,val_fb_iou=self.val_average_meter.compute_iou()<line_sep>self.log("fewshot_val_loss" val_loss)<line_sep>self.log("fewshot_val_miou" val_miou)<line_sep>self.log("fewshot_val_fb_iou" val_fb_iou)<if_stmt>self.global_rank<eq>0<block_start>Logger.tbd_writer.add_scalars('fewshot_data/data/loss' {'trn_loss':self.fewshot_trn_loss 'val_loss':val_loss} self.current_epoch)<line_sep>Logger.tbd_writer.add_scalars('fewshot_data/data/miou' {'trn_miou':self.fewshot_trn_miou 'val_miou':val_miou} self.current_epoch)<line_sep>Logger.tbd_writer.add_scalars('fewshot_data/data/fb_iou' {'trn_fb_iou':self.fewshot_trn_fb_iou 'val_fb_iou':val_fb_iou} self.current_epoch)<line_sep>Logger.tbd_writer.flush()<if_stmt>self.current_epoch+1<eq>self.epochs<block_start>Logger.tbd_writer.close()<line_sep>Logger.info('==================== Finished Training ====================')<block_end><block_end>threshold_epoch=3<if_stmt>self.args.benchmark<in>['pascal' 'coco']<and>self.current_epoch<ge>threshold_epoch<block_start>print('End this loop!')<line_sep>exit()<block_end><block_end><def_stmt>configure_optimizers self# if we want to fix the encoder
<block_start><if_stmt>self.fixed_encoder<block_start>params_list=[{"params":self.net.pretrained.model.parameters() "lr":0} ]<line_sep>params_list.append({"params":self.net.pretrained.act_postprocess1.parameters() "lr":self.base_lr})<line_sep>params_list.append({"params":self.net.pretrained.act_postprocess2.parameters() "lr":self.base_lr})<line_sep>params_list.append({"params":self.net.pretrained.act_postprocess3.parameters() "lr":self.base_lr})<line_sep>params_list.append({"params":self.net.pretrained.act_postprocess4.parameters() "lr":self.base_lr})<block_end><else_stmt><block_start>params_list=[{"params":self.net.pretrained.parameters() "lr":self.base_lr} ]<block_end><if_stmt>hasattr(self.net "scratch")<block_start>print("Found output scratch")<line_sep>params_list.append({"params":self.net.scratch.parameters() "lr":self.base_lr<times>10})<block_end><if_stmt>hasattr(self.net "auxlayer")<block_start>print("Found auxlayer")<line_sep>params_list.append({"params":self.net.auxlayer.parameters() "lr":self.base_lr<times>10})<block_end><if_stmt>hasattr(self.net "scale_inv_conv")<block_start>print(self.net.scale_inv_conv)<line_sep>print("Found scaleinv layers")<line_sep>params_list.append({"params":self.net.scale_inv_conv.parameters() "lr":self.base_lr<times>10 })<line_sep>params_list.append({"params":self.net.scale2_conv.parameters() "lr":self.base_lr<times>10})<line_sep>params_list.append({"params":self.net.scale3_conv.parameters() "lr":self.base_lr<times>10})<line_sep>params_list.append({"params":self.net.scale4_conv.parameters() "lr":self.base_lr<times>10})<block_end><if_stmt>self.other_kwargs["midasproto"]<block_start>print("Using midas optimization protocol")<line_sep>opt=torch.optim.Adam(params_list lr=self.base_lr betas=(0.9 0.999) weight_decay=self.other_kwargs["weight_decay"] )<line_sep>sch=torch.optim.lr_scheduler.LambdaLR(opt <lambda>x:pow(1.0-x/self.epochs 0.9))<block_end><else_stmt><block_start>opt=torch.optim.SGD(params_list lr=self.base_lr momentum=0.9 weight_decay=self.other_kwargs["weight_decay"] )<line_sep>sch=torch.optim.lr_scheduler.LambdaLR(opt <lambda>x:pow(1.0-x/self.epochs 0.9))<block_end><return>[opt] [sch]<block_end><def_stmt>train_dataloader self<block_start><if_stmt>self.args.finetune_mode<block_start>dataloader=FSSDataset.build_dataloader(self.args.benchmark self.args.bsz self.args.nworker self.args.fold 'test' self.args.nshot)<block_end><else_stmt><block_start>dataloader=FSSDataset.build_dataloader(self.args.benchmark self.args.bsz self.args.nworker self.args.fold 'trn')<block_end>self.len_train_dataloader=len(dataloader)<floordiv>torch.cuda.device_count()<line_sep>self.train_average_meter=AverageMeter(dataloader.dataset)<line_sep><return>dataloader<block_end><def_stmt>val_dataloader self<block_start>self.val_iou=SegmentationMetric(self.num_classes)<if_stmt>self.args.finetune_mode<block_start>dataloader=FSSDataset.build_dataloader(self.args.benchmark self.args.bsz self.args.nworker self.args.fold 'test' self.args.nshot)<block_end><else_stmt><block_start>dataloader=FSSDataset.build_dataloader(self.args.benchmark self.args.bsz self.args.nworker self.args.fold 'val')<block_end>self.len_val_dataloader=len(dataloader)<floordiv>torch.cuda.device_count()<line_sep>self.val_average_meter=AverageMeter(dataloader.dataset)<line_sep><return>dataloader<block_end><def_stmt>criterion self logit_mask gt_mask<block_start>bsz=logit_mask.size(0)<line_sep>logit_mask=logit_mask.view(bsz 2 -1)<line_sep>gt_mask=gt_mask.view(bsz -1).long()<line_sep><return>self.cross_entropy_loss(logit_mask gt_mask)<block_end>@staticmethod<def_stmt>add_model_specific_args parent_parser<block_start>parser=ArgumentParser(parents=[parent_parser] add_help=<false>)<line_sep>parser.add_argument("--data_path" type=str default='' help="path where dataset is stored")<line_sep>parser.add_argument("--dataset" type=str default='pascal' choices=['pascal' 'coco' 'fss'] )<line_sep>parser.add_argument("--batch_size" type=int default=20 help="size of the batches")<line_sep>parser.add_argument("--base_lr" type=float default=0.004 help="learning rate")<line_sep>parser.add_argument("--momentum" type=float default=0.9 help="SGD momentum")<line_sep>parser.add_argument("--weight_decay" type=float default=1e-4 help="weight_decay")<line_sep>parser.add_argument("--aux" action="store_true" default=<false> help="Auxilary Loss")<line_sep>parser.add_argument("--aux-weight" type=float default=0.2 help="Auxilary loss weight (default: 0.2)" )<line_sep>parser.add_argument("--se-loss" action="store_true" default=<false> help="Semantic Encoding Loss SE-loss" )<line_sep>parser.add_argument("--se-weight" type=float default=0.2 help="SE-loss weight (default: 0.2)")<line_sep>parser.add_argument("--midasproto" action="store_true" default=<false> help="midasprotocol")<line_sep>parser.add_argument("--ignore_index" type=int default=-1 help="numeric value of ignore label in gt" )<line_sep>parser.add_argument("--augment" action="store_true" default=<false> help="Use extended augmentations" )<line_sep>parser.add_argument("--use_relabeled" action="store_true" default=<false> help="Use extended augmentations" )<line_sep>parser.add_argument("--nworker" type=int default=8)<line_sep>parser.add_argument("--fold" type=int default=0 choices=[0 1 2 3])<line_sep>parser.add_argument("--logpath" type=str default='')<line_sep>parser.add_argument("--nshot" type=int default=0#1
)<line_sep>parser.add_argument("--finetune_mode" action="store_true" default=<false> help="whether finetune or not")<line_sep><return>parser<block_end><block_end>
|
# encoding: utf-8
<import_stmt>os getpass<import_stmt>os.path<as>osp<import_stmt>argparse<import_from_stmt>easydict EasyDict<as>edict<import_from_stmt>dataset.data_settings load_dataset<import_from_stmt>cvpack.utils.pyt_utils ensure_dir<class_stmt>Config# -------- Directoy Config -------- #
<block_start>ROOT_DIR=os.environ['PROJECT_HOME']<line_sep>OUTPUT_DIR=osp.join(ROOT_DIR 'model_logs' osp.split(osp.split(osp.realpath(__file__))[0])[1])<line_sep>TEST_DIR=osp.join(OUTPUT_DIR 'log_dir')<line_sep>TENSORBOARD_DIR=osp.join(OUTPUT_DIR 'tb_dir')<line_sep># -------- Data Config -------- #
DATALOADER=edict()<line_sep>DATALOADER.NUM_WORKERS=0<line_sep>DATALOADER.ASPECT_RATIO_GROUPING=<false><line_sep>DATALOADER.SIZE_DIVISIBILITY=0<line_sep>DATASET=edict()<line_sep>DATASET.NAME='MIX'<line_sep>dataset=load_dataset(DATASET.NAME)<line_sep>DATASET.KEYPOINT=dataset.KEYPOINT<line_sep>DATASET.PAF=dataset.PAF<line_sep>DATASET.ROOT_IDX=dataset.ROOT_IDX# pelvis or neck
DATASET.MAX_PEOPLE=20<line_sep>INPUT=edict()<line_sep>INPUT.NORMALIZE=<true><line_sep>INPUT.MEANS=[0.406 0.456 0.485]# bgr
INPUT.STDS=[0.225 0.224 0.229]<line_sep>INPUT_SHAPE=dataset.INPUT_SHAPE<line_sep>OUTPUT_SHAPE=dataset.OUTPUT_SHAPE<line_sep># -------- Model Config -------- #
MODEL=edict()<line_sep>MODEL.STAGE_NUM=3<line_sep>MODEL.UPSAMPLE_CHANNEL_NUM=256<line_sep>MODEL.DEVICE='cuda'<line_sep>MODEL.WEIGHT=<none># osp.join(ROOT_DIR, 'lib/models/resnet-50_rename.pth')
# -------- Training Config -------- #
SOLVER=edict()<line_sep>SOLVER.IMG_PER_GPU=2<line_sep>SOLVER.BASE_LR=2e-4<line_sep>SOLVER.CHECKPOINT_PERIOD=4800<line_sep>SOLVER.MAX_ITER=96000<times>2<line_sep>SOLVER.WEIGHT_DECAY=8e-6<line_sep>SOLVER.WARMUP_FACTOR=0.1<line_sep>SOLVER.WARMUP_ITERS=2400<line_sep>LOSS=edict()<line_sep>LOSS.OHKM=<true><line_sep>LOSS.TOPK=8<line_sep>LOSS.COARSE_TO_FINE=<true><line_sep>WITH_MDS=<true><line_sep>RUN_EFFICIENT=<false><line_sep># -------- Test Config -------- #
TEST=edict()<line_sep>TEST.IMG_PER_GPU=16<line_sep>TEST.ROOT_PATH='/data/MultiPersonTestSet'# '/data/datasets/mupots-3d-eval/MultiPersonTestSet'
TEST.JSON_PATH=osp.join(TEST.ROOT_PATH 'M3E_gt.json')<block_end>config=Config()<line_sep>cfg=config<def_stmt>link_log_dir <block_start><if_stmt><not>osp.exists('./log')<block_start>ensure_dir(config.OUTPUT_DIR)<line_sep>cmd='ln -s '+config.OUTPUT_DIR+' log'<line_sep>os.system(cmd)<block_end><block_end><def_stmt>make_parser <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('-log' '--linklog' default=<false> action='store_true')<line_sep><return>parser<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=make_parser()<line_sep>args=parser.parse_args()<if_stmt>args.linklog<block_start>link_log_dir()<block_end><block_end>
|
<import_from_stmt>nri.models.decoders.mlp_decoder MLPDecoder<import_from_stmt>nri.models.decoders.rnn_decoder RNNDecoder<line_sep>
|
<import_from_stmt>conway conway<import_stmt>unittest<import_stmt>numpy<as>np<class_stmt>TestConway(unittest.TestCase)<block_start><def_stmt>test_still self<block_start>"""2x2 block"""<line_sep>A=np.zeros((10 10))<line_sep>A[1:3 1:3]=1<line_sep>B=conway(A)<assert_stmt>(A<eq>B).all()<block_end><def_stmt>test_scillator self<block_start>"""blinker"""<line_sep>A=np.zeros((10 10))<line_sep>A[1:4 1]=1<line_sep>B=conway(A)<assert_stmt>(B[2 0:3]<eq>1).all()<line_sep>B=conway(B)<assert_stmt>(A<eq>B).all()<block_end><def_stmt>test_evolution self<block_start>"""test that something changes"""<line_sep>m,n=10 10<line_sep>A=np.random.random(m<times>n).reshape((m n)).round()<line_sep>B=conway(A)<assert_stmt>(B<ne>A).any()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>mercari.datasets_mx prepare_vectorizer_1 prepare_vectorizer_2 prepare_vectorizer_3<import_from_stmt>mercari.datasets_tf prepare_vectorizer_1_tf prepare_vectorizer_2_tf prepare_vectorizer_3_tf<import_from_stmt>mercari.mercari_io load_train<import_from_stmt>mercari.mx_sparse MXRegression MXRegressionClf<import_from_stmt>mercari.tf_sparse RegressionHuber RegressionClf<import_from_stmt>mercari.utils rmsle<line_sep>@pytest.mark.parametrize('vectorizer' [prepare_vectorizer_1() prepare_vectorizer_2() prepare_vectorizer_3() prepare_vectorizer_1_tf() prepare_vectorizer_2_tf() prepare_vectorizer_3_tf() ])@pytest.mark.parametrize('model' [MXRegression(n_epoch=3 loss='huber') MXRegression(n_epoch=3 binary_X=<true> loss='huber') MXRegressionClf(n_epoch=3 n_hidden=(196 64)) MXRegressionClf(n_epoch=3 n_hidden=(196 64) binary_X=<true>) RegressionHuber(n_epoch=3) RegressionHuber(n_epoch=3 binary_X=<true>) RegressionClf(n_epoch=3 n_hidden=(196 64)) RegressionClf(n_epoch=3 n_hidden=(196 64) binary_X=<true>)])<def_stmt>test_end_to_end vectorizer model<block_start>_test(vectorizer model n_rows=<none>)<block_end>@pytest.mark.parametrize('model' [MXRegression(n_epoch=3 loss='huber') MXRegressionClf(n_epoch=3 n_hidden=(196 64)) RegressionHuber(n_epoch=3) RegressionClf(n_epoch=3 n_hidden=(196 64)) ])@pytest.mark.parametrize('vectorizer' [prepare_vectorizer_1() prepare_vectorizer_1_tf() ])@pytest.mark.parametrize('n_rows' [<none> 'random' 1 2 2<power>10 2<power>13-1 2<power>13 2<power>13+1 2<power>13+2<power>10 ])<def_stmt>test_random_number_of_rows vectorizer model n_rows<block_start>_test(vectorizer model n_rows)<block_end><def_stmt>_test vectorizer model n_rows<block_start>tr=load_train('tests/train_10k.tsv')<line_sep>tr,va=train_test_split(tr)<line_sep>te=pd.read_csv('tests/test_10k_corrupted.tsv' sep="\t")<if_stmt>n_rows<is><not><none><block_start><if_stmt>n_rows<eq>'random'<block_start>n_rows=np.random.randint(1 te.shape[0])<line_sep>te=te.sample(n=n_rows)<block_end><block_end>mat_tr=vectorizer.fit_transform(tr tr.price)<line_sep>mat_te=vectorizer.transform(te.copy())<line_sep>mat_va=vectorizer.transform(va)<line_sep>model.fit(mat_tr np.log1p(tr.price))<assert_stmt>rmsle(np.expm1(model.predict(mat_va)) va.price)<l>0.85<line_sep>te_preds=np.expm1(model.predict(mat_te))<assert_stmt>te_preds.shape[0]<eq>te.shape[0]<assert_stmt>np.all(np.isfinite(te_preds))<assert_stmt>te_preds.min()<ge>-1 "min price is {}".format(te_preds.min())<assert_stmt>te_preds.max()<le>3000 "max price is {}".format(te_preds.max())<block_end>
|
<import_stmt>os<import_stmt>random<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>time time<import_from_stmt>tqdm tqdm<import_from_stmt>copy deepcopy<import_from_stmt>pathlib Path<import_from_stmt>prettytable PrettyTable<import_from_stmt>common.test test_v2<import_from_stmt>common.utils early_stopping print_dict<import_from_stmt>common.config parse_args<import_from_stmt>common.dataset CKGData<import_from_stmt>common.dataset.build build_loader<import_from_stmt>modules.sampler KGPolicy<import_from_stmt>modules.recommender MF<def_stmt>train_one_epoch recommender sampler train_loader recommender_optim sampler_optim adj_matrix edge_matrix train_data cur_epoch avg_reward <block_start>loss,base_loss,reg_loss=0 0 0<line_sep>epoch_reward=0<line_sep>"""Train one epoch"""<line_sep>tbar=tqdm(train_loader ascii=<true>)<line_sep>num_batch=len(train_loader)<for_stmt>batch_data tbar<block_start>tbar.set_description("Epoch {}".format(cur_epoch))<if_stmt>torch.cuda.is_available()<block_start>batch_data={k:v.cuda(non_blocking=<true>)<for>k,v batch_data.items()}<block_end>"""Train recommender using negtive item provided by sampler"""<line_sep>recommender_optim.zero_grad()<line_sep>neg=batch_data["neg_i_id"]<line_sep>pos=batch_data["pos_i_id"]<line_sep>users=batch_data["u_id"]<line_sep>selected_neg_items_list,_=sampler(batch_data adj_matrix edge_matrix)<line_sep>selected_neg_items=selected_neg_items_list[-1 :]<line_sep>train_set=train_data[users]<line_sep>in_train=torch.sum(selected_neg_items.unsqueeze(1)<eq>train_set.long() dim=1).byte()<line_sep>selected_neg_items[in_train]=neg[in_train]<line_sep>base_loss_batch,reg_loss_batch=recommender(users pos selected_neg_items)<line_sep>loss_batch=base_loss_batch+reg_loss_batch<line_sep>loss_batch.backward()<line_sep>recommender_optim.step()<line_sep>"""Train sampler network"""<line_sep>sampler_optim.zero_grad()<line_sep>selected_neg_items_list,selected_neg_prob_list=sampler(batch_data adj_matrix edge_matrix)<with_stmt>torch.no_grad()<block_start>reward_batch=recommender.get_reward(users pos selected_neg_items_list)<block_end>epoch_reward<augadd>torch.sum(reward_batch)<line_sep>reward_batch<augsub>avg_reward<line_sep>batch_size=reward_batch.size(1)<line_sep>n=reward_batch.size(0)-1<line_sep>R=torch.zeros(batch_size device=reward_batch.device)<line_sep>reward=torch.zeros(reward_batch.size() device=reward_batch.device)<line_sep>gamma=args_config.gamma<for_stmt>i,r enumerate(reward_batch.flip(0))<block_start>R=r+gamma<times>R<line_sep>reward[n-i]=R<block_end>reinforce_loss=-1<times>torch.sum(reward_batch<times>selected_neg_prob_list)<line_sep>reinforce_loss.backward()<line_sep>sampler_optim.step()<line_sep>"""record loss in an epoch"""<line_sep>loss<augadd>loss_batch<line_sep>reg_loss<augadd>reg_loss_batch<line_sep>base_loss<augadd>base_loss_batch<block_end>avg_reward=epoch_reward/num_batch<line_sep>train_res=PrettyTable()<line_sep>train_res.field_names=["Epoch" "Loss" "BPR-Loss" "Regulation" "AVG-Reward"]<line_sep>train_res.add_row([cur_epoch loss.item() base_loss.item() reg_loss.item() avg_reward.item()])<line_sep>print(train_res)<line_sep><return>loss base_loss reg_loss avg_reward<block_end><def_stmt>save_model file_name model config<block_start><if_stmt><not>os.path.isdir(config.out_dir)<block_start>os.mkdir(config.out_dir)<block_end>model_file=Path(config.out_dir+file_name)<line_sep>model_file.touch(exist_ok=<true>)<line_sep>print("Saving model...")<line_sep>torch.save(model.state_dict() model_file)<block_end><def_stmt>build_sampler_graph n_nodes edge_threshold graph<block_start>adj_matrix=torch.zeros(n_nodes edge_threshold<times>2)<line_sep>edge_matrix=torch.zeros(n_nodes edge_threshold)<line_sep>"""sample neighbors for each node"""<for_stmt>node tqdm(graph.nodes ascii=<true> desc="Build sampler matrix")<block_start>neighbors=list(graph.neighbors(node))<if_stmt>len(neighbors)<ge>edge_threshold<block_start>sampled_edge=random.sample(neighbors edge_threshold)<line_sep>edges=deepcopy(sampled_edge)<block_end><else_stmt><block_start>neg_id=random.sample(range(CKG.item_range[0] CKG.item_range[1]+1) edge_threshold-len(neighbors) )<line_sep>node_id=[node]<times>(edge_threshold-len(neighbors))<line_sep>sampled_edge=neighbors+neg_id<line_sep>edges=neighbors+node_id<block_end>"""concatenate sampled edge with random edge"""<line_sep>sampled_edge<augadd>random.sample(range(CKG.item_range[0] CKG.item_range[1]+1) edge_threshold)<line_sep>adj_matrix[node]=torch.tensor(sampled_edge dtype=torch.long)<line_sep>edge_matrix[node]=torch.tensor(edges dtype=torch.long)<block_end><if_stmt>torch.cuda.is_available()<block_start>adj_matrix=adj_matrix.cuda().long()<line_sep>edge_matrix=edge_matrix.cuda().long()<block_end><return>adj_matrix edge_matrix<block_end><def_stmt>build_train_data train_mat<block_start>num_user=max(train_mat.keys())+1<line_sep>num_true=max([len(i)<for>i train_mat.values()])<line_sep>train_data=torch.zeros(num_user num_true)<for_stmt>i train_mat.keys()<block_start>true_list=train_mat[i]<line_sep>true_list<augadd>[-1]<times>(num_true-len(true_list))<line_sep>train_data[i]=torch.tensor(true_list dtype=torch.long)<block_end><return>train_data<block_end><def_stmt>train train_loader test_loader graph data_config args_config<block_start>"""build padded training set"""<line_sep>train_mat=graph.train_user_dict<line_sep>train_data=build_train_data(train_mat)<if_stmt>args_config.pretrain_r<block_start>print("\nLoad model from {}".format(args_config.data_path+args_config.model_path))<line_sep>paras=torch.load(args_config.data_path+args_config.model_path)<line_sep>all_embed=torch.cat((paras["user_para"] paras["item_para"]))<line_sep>data_config["all_embed"]=all_embed<block_end>recommender=MF(data_config=data_config args_config=args_config)<line_sep>sampler=KGPolicy(recommender data_config args_config)<if_stmt>torch.cuda.is_available()<block_start>train_data=train_data.long().cuda()<line_sep>sampler=sampler.cuda()<line_sep>recommender=recommender.cuda()<line_sep>print("\nSet sampler as: {}".format(str(sampler)))<line_sep>print("Set recommender as: {}\n".format(str(recommender)))<block_end>recommender_optimer=torch.optim.Adam(recommender.parameters() lr=args_config.rlr)<line_sep>sampler_optimer=torch.optim.Adam(sampler.parameters() lr=args_config.slr)<line_sep>loss_loger,pre_loger,rec_loger,ndcg_loger,hit_loger=[] [] [] [] []<line_sep>stopping_step,cur_best_pre_0,avg_reward=0 0.0 0<line_sep>t0=time()<for_stmt>epoch range(args_config.epoch)<block_start><if_stmt>epoch%args_config.adj_epoch<eq>0<block_start>"""sample adjacency matrix"""<line_sep>adj_matrix,edge_matrix=build_sampler_graph(data_config["n_nodes"] args_config.edge_threshold graph.ckg_graph)<block_end>cur_epoch=epoch+1<line_sep>loss,base_loss,reg_loss,avg_reward=train_one_epoch(recommender sampler train_loader recommender_optimer sampler_optimer adj_matrix edge_matrix train_data cur_epoch avg_reward )<line_sep>"""Test"""<if_stmt>cur_epoch%args_config.show_step<eq>0<block_start><with_stmt>torch.no_grad()<block_start>ret=test_v2(recommender args_config.Ks graph)<block_end>loss_loger.append(loss)<line_sep>rec_loger.append(ret["recall"])<line_sep>pre_loger.append(ret["precision"])<line_sep>ndcg_loger.append(ret["ndcg"])<line_sep>hit_loger.append(ret["hit_ratio"])<line_sep>print_dict(ret)<line_sep>cur_best_pre_0,stopping_step,should_stop=early_stopping(ret["recall"][0] cur_best_pre_0 stopping_step expected_order="acc" flag_step=args_config.flag_step )<if_stmt>should_stop<block_start><break><block_end><block_end><block_end>recs=np.array(rec_loger)<line_sep>pres=np.array(pre_loger)<line_sep>ndcgs=np.array(ndcg_loger)<line_sep>hit=np.array(hit_loger)<line_sep>best_rec_0=max(recs[: 0])<line_sep>idx=list(recs[: 0]).index(best_rec_0)<line_sep>final_perf=("Best Iter=[%d]@[%.1f]\n recall=[%s] \n precision=[%s] \n hit=[%s] \n ndcg=[%s]"%(idx time()-t0 "\t".join(["%.5f"%r<for>r recs[idx]]) "\t".join(["%.5f"%r<for>r pres[idx]]) "\t".join(["%.5f"%r<for>r hit[idx]]) "\t".join(["%.5f"%r<for>r ndcgs[idx]]) ))<line_sep>print(final_perf)<block_end><if_stmt>__name__<eq>"__main__"<block_start>"""fix the random seed"""<line_sep>seed=2020<line_sep>random.seed(seed)<line_sep>np.random.seed(seed)<line_sep>torch.manual_seed(seed)<line_sep>"""initialize args and dataset"""<line_sep>args_config=parse_args()<line_sep>CKG=CKGData(args_config)<line_sep>"""set the gpu id"""<if_stmt>torch.cuda.is_available()<block_start>torch.cuda.set_device(args_config.gpu_id)<block_end>data_config={"n_users":CKG.n_users "n_items":CKG.n_items "n_relations":CKG.n_relations+2 "n_entities":CKG.n_entities "n_nodes":CKG.entity_range[1]+1 "item_range":CKG.item_range }<line_sep>print("\ncopying CKG graph for data_loader.. it might take a few minutes")<line_sep>graph=deepcopy(CKG)<line_sep>train_loader,test_loader=build_loader(args_config=args_config graph=graph)<line_sep>train(train_loader=train_loader test_loader=test_loader graph=CKG data_config=data_config args_config=args_config )<block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>terraform_compliance.common.helper seek_key_in_dict # importing this purely because the unit tests require it to exist in global scope
Null <import_from_stmt>terraform_compliance.common.error_handling Error<def_stmt>it_must_contain_something _step_obj something inherited_values=Null child=<false><block_start>match=_step_obj.context.match<line_sep>seek_key_in_dict,seek_regex_key_in_dict_values=match.seek_key_in_dict match.seek_regex_key_in_dict_values<line_sep>prop_list=[]<line_sep>_step_obj.context.stash=inherited_values<if>inherited_values<is><not>Null<else>_step_obj.context.stash<if_stmt>_step_obj.context.type<in>('resource' 'data')<block_start><for_stmt>resource _step_obj.context.stash<block_start><if_stmt><not>isinstance(resource dict)<or>'values'<not><in>resource<or>'address'<not><in>resource<or>'type'<not><in>resource<block_start>resource={'values':resource 'address':resource 'type':_step_obj.context.name}<block_end># not going to use match.get here because the following line is an edge case
values=resource.get('values' resource.get('expressions' {}))<if_stmt><not>values<block_start>values=seek_key_in_dict(resource something)<block_end>found_values=[]<line_sep>found_key=Null# this could also become a list
resource_passed=<false><line_sep># set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
<if_stmt>isinstance(values dict)<block_start>found_key=match.get(values something Null)<if_stmt>found_key<is><not>Null<block_start>found_key=[{something:found_key}]<block_end><else_stmt><block_start>found_key=seek_key_in_dict(values something)<block_end><for_stmt>kv_pair found_key# kv_pair must be in {something: found_key} format.
<block_start><if_stmt><not>isinstance(kv_pair dict)<block_start><continue># should raise exception
<block_end># ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
<if_stmt>match.get(kv_pair something)<not><in>([] )<block_start>found_values.append(match.get(kv_pair something))<line_sep>resource_passed=<true><block_end><block_end><block_end><elif_stmt>isinstance(values list)<block_start><for_stmt>value values<block_start><if_stmt>isinstance(value dict)# First search in the keys
<block_start>found_key=seek_key_in_dict(value something)<line_sep># The following is an edge case that covers things like aws asg tags (https://www.terraform.io/docs/providers/aws/r/autoscaling_group.html)
# Then search in the values with 'key'
<if_stmt><not>found_key<block_start>found_key=seek_regex_key_in_dict_values(value 'key' something)<if_stmt>found_key<block_start>found_key=found_key[0]<line_sep># not going to use match.get here because the following line is an edge case
found_values.extend(value.get('value'))<line_sep>resource_passed=<true><line_sep><continue><block_end><block_end><block_end><elif_stmt>isinstance(value list)<block_start>_,temp_found_values=it_must_contain_something(_step_obj something value child=<true>)<line_sep>prop_list.extend(temp_found_values)<line_sep>resource_passed=<true><block_end><elif_stmt>isinstance(value (str bool int float))<block_start><if_stmt>match.equals(value something)<block_start>found_values.append(value)<line_sep>resource_passed=<true><block_end><block_end><if_stmt>found_key<is><not>Null<and>len(found_key)<block_start><for_stmt>found_key_instance found_key<block_start><if_stmt>isinstance(found_key_instance dict)<block_start><if_stmt>match.get(found_key_instance something Null)<not><in>(Null [] '' {})<block_start>found_values.append(match.get(found_key_instance something))<line_sep>resource_passed=<true><block_end><block_end><block_end><block_end><block_end><block_end><for_stmt>i,found_val enumerate(found_values)<block_start><if_stmt>isinstance(found_val dict)<and>'constant_value'<in>found_val<block_start>found_values[i]=found_val['constant_value']<block_end><block_end><for_stmt>found_val found_values<block_start>prop_list.append({'address':resource['address'] 'values':found_val 'type':_step_obj.context.name})<block_end># do not check prop list here because every resource should contain it.
<if_stmt><not>resource_passed<and><not>child# if nothing was found in this resource, don't error if you're a child
<block_start>Error(_step_obj '{} ({}) does not have {} property.'.format(resource['address'] resource.get('type' '') something))<block_end><block_end><if_stmt>prop_list<block_start>_step_obj.context.stash=prop_list<line_sep>_step_obj.context.property_name=something<line_sep><return>something prop_list<block_end><block_end><elif_stmt>_step_obj.context.type<eq>'provider'<block_start>prop_list=[]<for_stmt>provider_data _step_obj.context.stash<block_start>values=seek_key_in_dict(provider_data something)<if_stmt>values<block_start>prop_list.extend(values)<line_sep>_step_obj.context.property_name=something<line_sep>_step_obj.context.address='{}.{}'.format(provider_data.get('name' _step_obj.context.addresses) provider_data.get('alias' "\b"))<block_end><else_stmt><block_start>Error(_step_obj '{} {} does not have {} property.'.format(_step_obj.context.addresses _step_obj.context.type something))<block_end><block_end><if_stmt>prop_list<block_start>_step_obj.context.stash=prop_list<line_sep><return><true><block_end>Error(_step_obj '{} {} does not have {} property.'.format(_step_obj.context.addresses _step_obj.context.type something))<block_end><block_end><def_stmt>it_must_not_contain_something _step_obj something inherited_values=Null<block_start>match=_step_obj.context.match<line_sep>seek_key_in_dict,seek_regex_key_in_dict_values=match.seek_key_in_dict match.seek_regex_key_in_dict_values<line_sep>prop_list=[]<line_sep>_step_obj.context.stash=inherited_values<if>inherited_values<is><not>Null<else>_step_obj.context.stash<if_stmt>_step_obj.context.type<in>('resource' 'data')<block_start><for_stmt>resource _step_obj.context.stash<block_start><if_stmt><not>isinstance(resource dict)<or>'values'<not><in>resource<or>'address'<not><in>resource<or>'type'<not><in>resource<block_start>resource={'values':resource 'address':resource 'type':_step_obj.context.name}<block_end>values=resource.get('values' resource.get('expressions' {}))<if_stmt><not>values<block_start>values=seek_key_in_dict(resource something)<block_end>found_values=[]<line_sep>found_key=Null<line_sep>resource_passed=<false><line_sep># set this to True if you get anything from the resource, don't set it to False if you get empty values as there could be other values as well
<if_stmt>isinstance(values dict)<block_start>found_key=match.get(values something Null)<if_stmt>found_key<is><not>Null<block_start>found_key=[{something:found_key}]<block_end><else_stmt><block_start>found_key=seek_key_in_dict(values something)<block_end><for_stmt>kv_pair found_key# kv_pair must be in {something: found_key} format.
<block_start><if_stmt><not>isinstance(kv_pair dict)<block_start><continue># could raise an exception
<block_end># ignore the values that correspond to Null
# Following line could be problematic, how to determine if something is set to be empty or not set? Behavior is provider dependent.
# For now, allow '' and don't allow [] as per user cases.
<if_stmt>match.get(kv_pair something)<not><in>([] )<block_start>found_values.append(match.get(kv_pair something))<line_sep>resource_passed=<true><block_end><block_end><block_end><elif_stmt>isinstance(values list)<block_start><for_stmt>value values<block_start><if_stmt>isinstance(value dict)# First search in the keys
<block_start>found_key=seek_key_in_dict(value something)<line_sep># Then search in the values with 'key'
<if_stmt><not>found_key<block_start>found_key=seek_regex_key_in_dict_values(value 'key' something)<if_stmt>found_key<block_start>found_key=found_key[0]<line_sep>found_values.extend(value.get('value'))<line_sep>resource_passed=<true><line_sep><continue><block_end><block_end><block_end><elif_stmt>isinstance(value list)<block_start>_,temp_found_values=it_must_contain_something(_step_obj something value child=<true>)<line_sep>prop_list.extend(temp_found_values)<line_sep>resource_passed=<true><block_end><elif_stmt>isinstance(value (str bool int float))<block_start><if_stmt>match.equals(value something)<block_start>found_values.append(value)<line_sep>resource_passed=<true><block_end><block_end><if_stmt>found_key<is><not>Null<and>len(found_key)<block_start><for_stmt>found_key_instance found_key<block_start><if_stmt>isinstance(found_key_instance dict)<block_start><if_stmt>match.get(found_key_instance something Null)<not><in>(Null [] '' {})<block_start>found_values.append(match.get(found_key_instance something))<line_sep>resource_passed=<true><block_end><block_end><block_end><block_end><block_end><block_end><for_stmt>i,found_val enumerate(found_values)<block_start><if_stmt>isinstance(found_val dict)<and>'constant_value'<in>found_val<block_start>found_values[i]=found_val['constant_value']<block_end><block_end><if_stmt>resource_passed<block_start>Error(_step_obj '{} property exists in {} ({}).'.format(something resource['address'] resource.get('type' '')))<block_end><block_end><block_end><elif_stmt>_step_obj.context.type<eq>'provider'<block_start><for_stmt>provider_data _step_obj.context.stash<block_start>values=seek_key_in_dict(provider_data something)<if_stmt>values<block_start>Error(_step_obj '{} {} does not have {} property.'.format(_step_obj.context.addresses _step_obj.context.type something))<block_end><block_end><block_end><block_end>
|
# coding: utf-8
<import_from_future_stmt> absolute_import print_function unicode_literals <import_from_stmt>pydocx.openxml.packaging.open_xml_part OpenXmlPart<import_from_stmt>pydocx.openxml.wordprocessing Footnotes<class_stmt>FootnotesPart(OpenXmlPart)<block_start>'''
Represents a Footnotes part within a Word document container.
See also: http://msdn.microsoft.com/en-us/library/documentformat.openxml.packaging.footnotespart%28v=office.14%29.aspx # noqa
'''<line_sep>relationship_type='/'.join(['http://schemas.openxmlformats.org' 'officeDocument' '2006' 'relationships' 'footnotes' ])<def_stmt>__init__ self *args **kwargs<block_start>super(FootnotesPart self).__init__(*args **kwargs)<line_sep>self._footnotes=<none><block_end>@property<def_stmt>footnotes self<block_start><if_stmt><not>self._footnotes<block_start>self._footnotes=self.load_footnotes()<block_end><return>self._footnotes<block_end><def_stmt>load_footnotes self<block_start>self._footnotes=Footnotes.load(self.root_element container=self)<line_sep><return>self._footnotes<block_end><block_end>
|
# Copyright (c) 2012-2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 <NAME>
# Copyright (c) 2015 University of Kaiserslautern
# Copyright (c) 2015 The University of Bologna
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for WideIO memory devices
These memory "interfaces" contain the timing,energy,etc parameters for each
memory type and are usually based on datasheets for the memory devices.
You can use these interfaces in the MemCtrl object as the `dram` timing
interface.
"""<import_from_stmt>m5.objects DRAMInterface<class_stmt>WideIO_200_1x128(DRAMInterface)<block_start>"""
A single WideIO x128 interface (one command and address bus), with
default timings based on an estimated WIO-200 8 Gbit part.
"""<line_sep># No DLL for WideIO
dll=<false><line_sep># size of device
device_size="1024MiB"<line_sep># 1x128 configuration, 1 device with a 128-bit interface
device_bus_width=128<line_sep># This is a BL4 device
burst_length=4<line_sep># Each device has a page (row buffer) size of 4KB
# (this depends on the memory density)
device_rowbuffer_size="4KiB"<line_sep># 1x128 configuration, so 1 device
devices_per_rank=1<line_sep># Use one rank for a one-high die stack
ranks_per_channel=1<line_sep># WideIO has 4 banks in all configurations
banks_per_rank=4<line_sep># 200 MHz
tCK="5ns"<line_sep># WIO-200
tRCD="18ns"<line_sep>tCL="18ns"<line_sep>tRP="18ns"<line_sep>tRAS="42ns"<line_sep>tWR="15ns"<line_sep># Read to precharge is same as the burst
tRTP="20ns"<line_sep># 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz.
# Note this is a BL4 SDR device.
tBURST="20ns"<line_sep># WIO 8 Gb
tRFC="210ns"<line_sep># WIO 8 Gb, <=85C, half for >85C
tREFI="3.9us"<line_sep># Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns
tWTR="15ns"<line_sep># Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns
tRTW="10ns"<line_sep># Default different rank bus delay to 2 CK, @200 MHz = 10 ns
tCS="10ns"<line_sep># Activate to activate irrespective of density and speed grade
tRRD="10.0ns"<line_sep># Two instead of four activation window
tXAW="50ns"<line_sep>activation_limit=2<line_sep># The WideIO specification does not provide current information
<block_end>
|
<import_stmt>unittest2<import_stmt>vscode<import_stmt>json<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<import_stmt>lldbvscode_testcase<class_stmt>TestVSCode_redirection_to_console(lldbvscode_testcase.VSCodeTestCaseBase)<block_start>mydir=TestBase.compute_mydir(__file__)<line_sep>@skipIfWindows@skipIfRemote<def_stmt>test self<block_start>"""
Without proper stderr and stdout redirection, the following code would throw an
exception, like the following:
Exception: unexpected malformed message from lldb-vscode
"""<line_sep>program=self.getBuildArtifact("a.out")<line_sep>self.build_and_launch(program lldbVSCodeEnv={"LLDB_VSCODE_TEST_STDOUT_STDERR_REDIRECTION":""})<line_sep>source='main.cpp'<line_sep>breakpoint1_line=line_number(source '// breakpoint 1')<line_sep>breakpoint_ids=self.set_source_breakpoints(source [breakpoint1_line])<line_sep>self.assertEqual(len(breakpoint_ids) 1 "expect correct number of breakpoints")<line_sep>self.continue_to_breakpoints(breakpoint_ids)<line_sep>self.assertIn('argc' json.dumps(self.vscode.get_local_variables(frameIndex=1)))<block_end><block_end>
|
<import_stmt>struct<import_from_stmt>mmap mmap<import_from_stmt>typing Union<import_from_stmt>DyldExtractor.file_context FileContext<import_from_stmt>DyldExtractor.macho.segment_context SegmentContext<import_from_stmt>DyldExtractor.macho.macho_structs LoadCommandMap LoadCommands load_command UnknownLoadCommand mach_header_64 segment_command_64 <class_stmt>MachOContext(FileContext)<block_start>loadCommands:list[load_command]<line_sep>segments:dict[bytes SegmentContext]<line_sep>segmentsI:list[SegmentContext]<def_stmt>__init__ self file:mmap offset:int<arrow><none><block_start>"""A wrapper around a MachO file.
Provides convenient methods and attributes for a given MachO file.
Args:
file: The macho file.
offset: The offset to the header in the file.
"""<line_sep>super().__init__(file offset=offset)<line_sep>self.header=mach_header_64(file offset)<line_sep># check to make sure the MachO file is 64 bit
magic=self.header.magic<if_stmt>magic<eq>0xfeedface<or>magic<eq>0xcefaedfe<block_start><raise>Exception("MachOContext doesn't support 32bit files!")<block_end>self._parseLoadCommands()<line_sep><pass><block_end><def_stmt>getLoadCommand self cmdFilter:tuple[LoadCommands] multiple:bool=<false><arrow>Union[load_command tuple[load_command]]<block_start>"""Retreive a load command with its command ID
Args:
filter: The command to filter by.
multiple: Optional; To get multiple results instead of the first.
Returns:
If the command is not found, None is returned. If one was found it will
return the first match. If multiple is set to True, it will return a list
of matches.
"""<line_sep>matches=[]<for_stmt>loadCommand self.loadCommands<block_start><if_stmt>loadCommand.cmd<in>cmdFilter<block_start><if_stmt><not>multiple<block_start><return>loadCommand<block_end><else_stmt><block_start>matches.append(loadCommand)<block_end><block_end><block_end><if_stmt>len(matches)<eq>0<block_start><return><none><block_end><return>matches<block_end><def_stmt>containsAddr self address:int<arrow>bool<block_start>"""Check if the address is contained in the MachO file.
Args:
address: the VM address to check.
Returns:
Whether or not the address is contained in the segments
of this MachO file.
"""<for_stmt>segment self.segmentsI<block_start>seg=segment.seg<line_sep>lowBound=seg.vmaddr<line_sep>highBound=lowBound+seg.vmsize<if_stmt>address<ge>lowBound<and>address<l>highBound<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>_parseLoadCommands self<arrow><none><block_start>"""Parse the load commands
Parse the load commands and set the loadCommands attribute.
"""<line_sep>self.loadCommands=[]<line_sep>self.segments={}<line_sep>self.segmentsI=[]<line_sep>cmdOff=len(self.header)+self.fileOffset<for_stmt>_ range(self.header.ncmds)<block_start>self.file.seek(cmdOff)<line_sep>cmd=struct.unpack("<I" self.file.read(4))[0]<line_sep>command=LoadCommandMap.get(cmd UnknownLoadCommand)<if_stmt>command<eq>UnknownLoadCommand<block_start><raise>Exception(f"Unknown LoadCommand: {cmd}")<block_end>command=command(self.file cmdOff)<line_sep>cmdOff<augadd>command.cmdsize<line_sep>self.loadCommands.append(command)<line_sep># populate the segments at this point too
<if_stmt>isinstance(command segment_command_64)<block_start>segCtx=SegmentContext(self.file command)<line_sep>self.segments[command.segname]=segCtx<line_sep>self.segmentsI.append(segCtx)<block_end><block_end><pass><block_end><block_end>
|
"""
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""<import_from_stmt>atom.api Typed ForwardTyped Int Unicode Bool Event observe set_default <import_from_stmt>enaml.core.declarative d_<import_from_stmt>.view View ProxyView<class_stmt>ProxyImageView(ProxyView)<block_start>""" The abstract definition of a proxy relative layout object.
"""<line_sep>#: A reference to the Label declaration.
declaration=ForwardTyped(<lambda>:ImageView)<def_stmt>set_src self src<block_start><raise>NotImplementedError<block_end><def_stmt>set_max_height self height<block_start><raise>NotImplementedError<block_end><def_stmt>set_max_width self width<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>ImageView(View)<block_start>""" Displays image resources
"""<line_sep>#: Set the offset of the widget's text baseline from the widget's
#: top boundary.
# baseline = d_(Int(-1))
#
# baseline_align_bottom = d_(Bool())
#
# crop_to_padding = d_(Bool())
#: Sets a drawable as the content of this ImageView.
src=d_(Unicode())<line_sep>#: An optional argument to supply a maximum height for this view.
max_height=d_(Int())<line_sep>#: An optional argument to supply a maximum width for this view.
max_width=d_(Int())<line_sep>#: A reference to the ProxyImageView object.
proxy=Typed(ProxyImageView)<line_sep># -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe('src' 'max_height' 'max_width')<def_stmt>_update_proxy self change<block_start>""" An observer which sends the state change to the proxy.
"""<line_sep># The superclass implementation is sufficient.
super(ImageView self)._update_proxy(change)<block_end><block_end>
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>time<import_stmt>sys os<line_sep>sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))<line_sep>sys.path.insert(0 'third_party')<line_sep>sys.path.insert(0 './')<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.autograd Variable<import_from_stmt>ext_utils.badja_data BADJAData<import_from_stmt>ext_utils.joint_catalog SMALJointInfo<import_stmt>ext_utils.flowlib<as>flowlib<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>cv2<import_stmt>pdb<import_stmt>soft_renderer<as>sr<import_stmt>argparse<import_stmt>trimesh<import_from_stmt>nnutils.geom_utils obj_to_cam pinhole_cam orthographic_cam render_flow_soft_3<line_sep>parser=argparse.ArgumentParser(description='BADJA')<line_sep>parser.add_argument('--testdir' default='' help='path to test dir')<line_sep>parser.add_argument('--seqname' default='camel' help='sequence to test')<line_sep>parser.add_argument('--type' default='mesh' help='load mesh data or flow or zero')<line_sep>parser.add_argument('--cam_type' default='perspective' help='camera model, orthographic or perspective')<line_sep>parser.add_argument('--vis' dest='vis' action='store_true' help='whether to draw visualization')<line_sep>args=parser.parse_args()<line_sep>renderer_softflf=sr.SoftRenderer(image_size=256 dist_func='hard' aggr_func_alpha='hard' camera_mode='look_at' perspective=<false> aggr_func_rgb='hard' light_mode='vertex' light_intensity_ambient=1. light_intensity_directionals=0.)<def_stmt>process_flow model imgL_o imgR_o mean_L mean_R<block_start>testres=1<line_sep># for gray input images
<if_stmt>len(imgL_o.shape)<eq>2<block_start>imgL_o=np.tile(imgL_o[: : np.newaxis] (1 1 3))<line_sep>imgR_o=np.tile(imgR_o[: : np.newaxis] (1 1 3))<block_end># resize
maxh=imgL_o.shape[0]<times>testres<line_sep>maxw=imgL_o.shape[1]<times>testres<line_sep>max_h=int(maxh<floordiv>64<times>64)<line_sep>max_w=int(maxw<floordiv>64<times>64)<if_stmt>max_h<l>maxh<block_start>max_h<augadd>64<block_end><if_stmt>max_w<l>maxw<block_start>max_w<augadd>64<block_end>input_size=imgL_o.shape<line_sep>imgL=cv2.resize(imgL_o (max_w max_h))<line_sep>imgR=cv2.resize(imgR_o (max_w max_h))<line_sep>imgL_noaug=torch.Tensor(imgL/255.)[np.newaxis].float().cuda()<line_sep># flip channel, subtract mean
imgL=imgL[: : ::-1].copy()/255.-np.asarray(mean_L).mean(0)[np.newaxis np.newaxis :]<line_sep>imgR=imgR[: : ::-1].copy()/255.-np.asarray(mean_R).mean(0)[np.newaxis np.newaxis :]<line_sep>imgL=np.transpose(imgL [2 0 1])[np.newaxis]<line_sep>imgR=np.transpose(imgR [2 0 1])[np.newaxis]<line_sep># modify module according to inputs
<import_from_stmt>models.VCN_exp WarpModule flow_reg<for_stmt>i range(len(model.module.reg_modules))<block_start>model.module.reg_modules[i]=flow_reg([1 max_w<floordiv>(2<power>(6-i)) max_h<floordiv>(2<power>(6-i))] ent=getattr(model.module 'flow_reg%d'%2<power>(6-i)).ent maxdisp=getattr(model.module 'flow_reg%d'%2<power>(6-i)).md fac=getattr(model.module 'flow_reg%d'%2<power>(6-i)).fac).cuda()<block_end><for_stmt>i range(len(model.module.warp_modules))<block_start>model.module.warp_modules[i]=WarpModule([1 max_w<floordiv>(2<power>(6-i)) max_h<floordiv>(2<power>(6-i))]).cuda()<block_end># get intrinsics
intr_list=[torch.Tensor(inxx).cuda()<for>inxx [[1] [1] [1] [1] [1] [0] [0] [1] [0] [0]]]<line_sep>fl_next=1<line_sep>intr_list.append(torch.Tensor([fl_next]).cuda())<line_sep>disc_aux=[<none> <none> <none> intr_list imgL_noaug <none>]<line_sep># forward
imgL=Variable(torch.FloatTensor(imgL).cuda())<line_sep>imgR=Variable(torch.FloatTensor(imgR).cuda())<with_stmt>torch.no_grad()<block_start>imgLR=torch.cat([imgL imgR] 0)<line_sep>model.eval()<line_sep>torch.cuda.synchronize()<line_sep>start_time=time.time()<line_sep>rts=model(imgLR disc_aux)<line_sep>torch.cuda.synchronize()<line_sep>ttime=(time.time()-start_time)<line_sep>print('time = %.2f'%(ttime<times>1000))<line_sep>flow,logmid,occ,biseg,objseg=rts<block_end># upsampling
flow=torch.squeeze(flow).data.cpu().numpy()<line_sep>flow=np.concatenate([cv2.resize(flow[0] (input_size[1] input_size[0]))[: : np.newaxis] cv2.resize(flow[1] (input_size[1] input_size[0]))[: : np.newaxis]] -1)<line_sep>flow[: : 0]<augmul>imgL_o.shape[1]/max_w<line_sep>flow[: : 1]<augmul>imgL_o.shape[0]/max_h<line_sep>flow=np.concatenate((flow np.ones([flow.shape[0] flow.shape[1] 1])) -1)<line_sep>torch.cuda.empty_cache()<line_sep>flow=torch.Tensor(flow).cuda()[<none>]<line_sep><return>flow<block_end><def_stmt>preprocess_image img mask imgsize<block_start><if_stmt>len(img.shape)<eq>2<block_start>img=np.repeat(np.expand_dims(img 2) 3 axis=2)<block_end>mask=mask[: : :1]<line_sep># crop box
indices=np.where(mask<g>0)<line_sep>xid=indices[1]<line_sep>yid=indices[0]<line_sep>center=((xid.max()+xid.min())<floordiv>2 (yid.max()+yid.min())<floordiv>2)<line_sep>length=((xid.max()-xid.min())<floordiv>2 (yid.max()-yid.min())<floordiv>2)<line_sep>maxlength=int(1.2<times>max(length))<line_sep>length=(maxlength maxlength)<line_sep>alp=2<times>length[0]/float(imgsize)<line_sep>refpp=np.asarray(center)/(imgsize/2.)-1<line_sep><return>alp refpp center length[0]<block_end><def_stmt>draw_joints_on_image rgb_img joints visibility region_colors marker_types pred=<none> correct=<none><block_start>joints=joints[: ::-1]# OpenCV works in (x, y) rather than (i, j)
disp_img=rgb_img.copy()<line_sep>i=0<for_stmt>joint_coord,visible,color,marker_type zip(joints visibility region_colors marker_types)<block_start><if_stmt>visible<block_start>joint_coord=joint_coord.astype(int)<line_sep>cv2.circle(disp_img tuple(joint_coord) radius=3 color=[255 0 0] thickness=10)<if_stmt>pred<is><not><none><block_start><if_stmt>correct[i]<block_start>color=[0 255 0]<block_end><else_stmt><block_start>color=[0 0 255]<block_end>error=np.linalg.norm(joint_coord-pred[i ::-1] 2 -1)<line_sep>cv2.circle(disp_img tuple(joint_coord) radius=int(error) color=color thickness=3)<line_sep>cv2.line(disp_img tuple(joint_coord) tuple(pred[i ::-1]) color thickness=3)<block_end><block_end>i<augadd>1<block_end><return>disp_img<block_end><def_stmt>main <block_start>smal_joint_info=SMALJointInfo()<line_sep>badja_data=BADJAData(args.seqname)<line_sep>data_loader=badja_data.get_loader()<line_sep>print(args.testdir)<line_sep># store all the data
all_anno=[]<line_sep>all_mesh=[]<line_sep>all_cam=[]<line_sep>all_fr=[]<line_sep>all_fl=[]<line_sep>#import pdb; pdb.set_trace()
<for_stmt>anno data_loader<block_start>all_anno.append(anno)<line_sep>rgb_img,sil_img,joints,visible,name=anno<line_sep>seqname=name.split('/')[-2]<line_sep>fr=int(name.split('/')[-1].split('.')[-2])<line_sep>all_fr.append(fr)<line_sep>print('%s/%d'%(seqname fr))<line_sep># load mesh data or flow
<if_stmt>args.type<eq>'mesh'<block_start>mesh=trimesh.load('%s/pred%d.ply'%(args.testdir fr) process=<false>)<line_sep>all_mesh.append(mesh)<line_sep>cam=np.loadtxt('%s/cam%d.txt'%(args.testdir fr))<line_sep>all_cam.append(cam)<block_end><block_end><if_stmt>args.type<eq>'flow'<block_start><import_from_stmt>models.VCN_exp VCN<line_sep>model=VCN([1 256 256] md=[int(4<times>(256/256)) 4 4 4 4] fac=1)<line_sep>model=nn.DataParallel(model device_ids=[0])<line_sep>model.cuda()<line_sep>pretrained_dict=torch.load('/data/gengshay/vcn_weights/robexp.pth' map_location='cpu')<line_sep>mean_L=pretrained_dict['mean_L']<line_sep>mean_R=pretrained_dict['mean_R']<line_sep>model.load_state_dict(pretrained_dict['state_dict'] strict=<false>)<block_end># store all the results
pck_all=[]<for_stmt>i range(len(all_anno))<block_start><for_stmt>j range(len(all_anno))<block_start><if_stmt>i<ne>j# evaluate every two-frame
<block_start>refimg,refsil,refkp,refvis,refname=all_anno[i]<line_sep>tarimg,tarsil,tarkp,tarvis,tarname=all_anno[j]<line_sep>print('%s vs %s'%(refname tarname))<if_stmt>args.type<eq>'mesh'<block_start>refmesh,tarmesh=all_mesh[i] all_mesh[j]<line_sep>refcam,tarcam=all_cam[i] all_cam[j]<line_sep>img_size=max(refimg.shape)<line_sep>renderer_softflf.rasterizer.image_size=img_size<line_sep># render flow between mesh 1 and 2
refface=torch.Tensor(refmesh.faces[<none>]).cuda()<line_sep>verts=torch.Tensor(np.concatenate([refmesh.vertices[<none>] tarmesh.vertices[<none>]] 0)).cuda()<line_sep>Rmat=torch.Tensor(np.concatenate([refcam[<none> :3 :3] tarcam[<none> :3 :3]] 0)).cuda()<line_sep>Tmat=torch.Tensor(np.concatenate([refcam[<none> :3 3] tarcam[<none> :3 3]] 0)).cuda()<line_sep>ppoint=torch.Tensor(np.concatenate([refcam[<none> 3 2:] tarcam[<none> 3 2:]] 0)).cuda()<line_sep>scale=torch.Tensor(np.concatenate([refcam[<none> 3 :1] tarcam[<none> 3 :1]] 0)).cuda()<line_sep>scale=scale/img_size<times>2<line_sep>ppoint=ppoint/img_size<times>2-1<line_sep>verts_fl=obj_to_cam(verts Rmat Tmat[: <none>] nmesh=1 n_hypo=1 skin=<none>)<line_sep>verts_fl=torch.cat([verts_fl torch.ones_like(verts_fl[: : 0:1])] dim=-1)<line_sep>verts_pos=verts_fl.clone()<line_sep>verts_fl=pinhole_cam(verts_fl ppoint scale)<line_sep>flow_fw,bgmask_fw,fgmask_flowf=render_flow_soft_3(renderer_softflf verts_fl[:1] verts_fl[1:] refface)<line_sep>flow_fw[bgmask_fw]=0.<line_sep>flow_fw=torch.cat([flow_fw torch.zeros_like(flow_fw)[: : : :1]] -1)[: :refimg.shape[0] :refimg.shape[1]]<block_end><elif_stmt>args.type<eq>'flow'<block_start>flow_fw=process_flow(model refimg tarimg mean_L mean_R)<line_sep>flow_fw=(flow_fw)/(refimg.shape[0]/2.)<block_end><elif_stmt>args.type<eq>'zero'<block_start>flow_fw=torch.zeros(refimg.shape).cuda()[<none>]<block_end>refkpx=torch.Tensor(refkp.astype(float)).cuda()<line_sep>x0,y0=np.meshgrid(range(refimg.shape[1]) range(refimg.shape[0]))<line_sep>x0=torch.Tensor(x0).cuda()<line_sep>y0=torch.Tensor(y0).cuda()<line_sep>idx=((flow_fw[: : : :2].norm(2 -1)<l>1e-6).float().view(1 -1)<times>1e6+torch.pow(refkpx[: 0:1]-y0.view(1 -1) 2)+torch.pow(refkpx[: 1:2]-x0.view(1 -1) 2)).argmin(-1)<line_sep>samp_flow=flow_fw.view(-1 3)[idx][: :2]<line_sep>tarkp_pred=refkpx.clone()<line_sep>tarkp_pred[: 0]=tarkp_pred[: 0]+(samp_flow[: 1])<times>refimg.shape[0]/2<line_sep>tarkp_pred[: 1]=tarkp_pred[: 1]+(samp_flow[: 0])<times>refimg.shape[1]/2<line_sep>tarkp_pred=np.asarray(tarkp_pred.cpu())<line_sep>diff=np.linalg.norm(tarkp_pred-tarkp 2 -1)<line_sep>sqarea=np.sqrt((refsil[: : 0]<g>0).sum())<line_sep>correct=diff<l>sqarea<times>0.2<line_sep>correct=correct[np.logical_and(tarvis refvis)]<if_stmt>args.vis<block_start>rgb_vis=draw_joints_on_image(refimg refkp refvis smal_joint_info.joint_colors smal_joint_info.annotated_markers)<line_sep>tarimg=draw_joints_on_image(tarimg tarkp tarvis smal_joint_info.joint_colors smal_joint_info.annotated_markers pred=tarkp_pred correct=diff<l>sqarea<times>0.2)<line_sep>cv2.addWeighted(rgb_vis 0.5 flowlib.flow_to_image(np.asarray(flow_fw[0].clamp(-1 1).detach().cpu())) 0.5 0.0 rgb_vis)<line_sep>cv2.imwrite('%s/%05d-%05d-flo.png'%(args.testdir all_fr[i] all_fr[j]) rgb_vis[: : ::-1])<line_sep>cv2.imwrite('%s/%05d-%05d.png'%(args.testdir all_fr[i] all_fr[j]) tarimg[: : ::-1])<block_end>pck_all.append(correct)<block_end><block_end><block_end>print('PCK %.02f'%(100<times>np.concatenate(pck_all).astype(float).mean()))<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The code is based on HigherHRNet-Human-Pose-Estimation.
# (https://github.com/HRNet/HigherHRNet-Human-Pose-Estimation)
# Modified by <NAME> (<EMAIL>).
# ------------------------------------------------------------------------------
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>logging<import_stmt>torch<import_stmt>torch.nn<as>nn<line_sep>logger=logging.getLogger(__name__)<def_stmt>make_input t requires_grad=<false> need_cuda=<true><block_start>inp=torch.autograd.Variable(t requires_grad=requires_grad)<line_sep>inp=inp.sum()<if_stmt>need_cuda<block_start>inp=inp.cuda()<block_end><return>inp<block_end><class_stmt>HeatmapLoss(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>forward self pred gt mask<block_start><assert_stmt>pred.size()<eq>gt.size()<line_sep>loss=((pred-gt)<power>2)<times>mask<line_sep>loss=loss.mean(dim=3).mean(dim=2).mean(dim=1)<line_sep><return>loss<block_end><block_end><class_stmt>OffsetsLoss(nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>smooth_l1_loss self pred gt beta=1./9<block_start>l1_loss=torch.abs(pred-gt)<line_sep>cond=l1_loss<l>beta<line_sep>loss=torch.where(cond 0.5<times>l1_loss<power>2/beta l1_loss-0.5<times>beta)<line_sep><return>loss<block_end><def_stmt>forward self pred gt weights<block_start><assert_stmt>pred.size()<eq>gt.size()<line_sep>num_pos=torch.nonzero(weights<g>0).size()[0]<line_sep>loss=self.smooth_l1_loss(pred gt)<times>weights<if_stmt>num_pos<eq>0<block_start>num_pos=1.<block_end>loss=loss.sum()/num_pos<line_sep><return>loss<block_end><block_end><class_stmt>MultiLossFactory(nn.Module)<block_start><def_stmt>__init__ self cfg<block_start>super().__init__()<line_sep>self._init_check(cfg)<line_sep>self.num_joints=cfg.MODEL.NUM_JOINTS<line_sep>self.num_stages=cfg.LOSS.NUM_STAGES<line_sep>self.heatmaps_loss=nn.ModuleList([HeatmapLoss()<if>with_heatmaps_loss<else><none><for>with_heatmaps_loss cfg.LOSS.WITH_HEATMAPS_LOSS])<line_sep>self.heatmaps_loss_factor=cfg.LOSS.HEATMAPS_LOSS_FACTOR<line_sep>self.offsets_loss=nn.ModuleList([OffsetsLoss()<if>with_offsets_loss<else><none><for>with_offsets_loss cfg.LOSS.WITH_OFFSETS_LOSS])<line_sep>self.offsets_loss_factor=cfg.LOSS.OFFSETS_LOSS_FACTOR<block_end><def_stmt>forward self outputs poffsets heatmaps masks offsets offset_w<block_start>heatmaps_losses=[]<line_sep>offsets_losses=[]<for_stmt>idx range(len(outputs))<block_start>with_heatmaps_loss=self.heatmaps_loss[idx]<line_sep>with_offsets_loss=self.offsets_loss[idx]<if_stmt>with_heatmaps_loss<and>len(outputs[idx])<g>0<block_start>num_outputs=len(outputs[idx])<if_stmt>num_outputs<g>1<block_start>heatmaps_pred=torch.cat(outputs[idx] dim=1)<line_sep>c=outputs[idx][0].shape[1]<if_stmt>len(heatmaps[idx])<g>1<block_start>heatmaps_gt=[heatmaps[idx][i][: :c]<for>i range(num_outputs)]<line_sep>heatmaps_gt=torch.cat(heatmaps_gt dim=1)<line_sep>mask=[masks[idx][i].expand_as(outputs[idx][0])<for>i range(num_outputs)]<line_sep>mask=torch.cat(mask dim=1)<block_end><else_stmt><block_start>heatmaps_gt=torch.cat([heatmaps[idx][0][: :c]<for>i range(num_outputs)] dim=1)<line_sep>mask=[masks[idx][0].expand_as(outputs[idx][0])<for>i range(num_outputs)]<line_sep>mask=torch.cat(mask dim=1)<block_end><block_end><else_stmt><block_start>heatmaps_pred=outputs[idx][0]<line_sep>c=heatmaps_pred.shape[1]<line_sep>heatmaps_gt=heatmaps[idx][0][: :c]<line_sep>mask=masks[idx][0].expand_as(heatmaps_pred)<block_end>heatmaps_loss=with_heatmaps_loss(heatmaps_pred heatmaps_gt mask)<line_sep>heatmaps_loss=heatmaps_loss<times>self.heatmaps_loss_factor[0]<line_sep>heatmaps_losses.append(heatmaps_loss)<block_end><else_stmt><block_start>heatmaps_losses.append(<none>)<block_end><if_stmt>with_offsets_loss<and>len(poffsets[idx])<g>0<block_start>num_poffsets=len(poffsets[idx])<if_stmt>num_poffsets<g>1<block_start>offset_pred=torch.cat(poffsets[idx] dim=1)<line_sep>offset_gt=torch.cat([offsets[idx][0]<for>i range(num_poffsets)] dim=1)<line_sep>offset_w=torch.cat([offset_w[idx][0]<for>i range(num_poffsets)] dim=1)<block_end><else_stmt><block_start>offset_pred=poffsets[idx][0]<line_sep>offset_gt=offsets[idx][0]<line_sep>offset_w=offset_w[idx][0]<block_end>offsets_loss=with_offsets_loss(offset_pred offset_gt offset_w)<line_sep>offsets_loss=offsets_loss<times>self.offsets_loss_factor[0]<line_sep>offsets_losses.append(offsets_loss)<block_end><else_stmt><block_start>offsets_losses.append(<none>)<block_end><block_end><return>heatmaps_losses offsets_losses<block_end><def_stmt>_init_check self cfg<block_start><assert_stmt>isinstance(cfg.LOSS.WITH_HEATMAPS_LOSS (list tuple)) 'LOSS.WITH_HEATMAPS_LOSS should be a list or tuple'<assert_stmt>isinstance(cfg.LOSS.HEATMAPS_LOSS_FACTOR (list tuple)) 'LOSS.HEATMAPS_LOSS_FACTOR should be a list or tuple'<assert_stmt>len(cfg.LOSS.WITH_HEATMAPS_LOSS)<eq>cfg.LOSS.NUM_STAGES 'LOSS.WITH_HEATMAPS_LOSS and LOSS.NUM_STAGE should have same length, got {} vs {}.'.format(len(cfg.LOSS.WITH_HEATMAPS_LOSS) cfg.LOSS.NUM_STAGES)<assert_stmt>len(cfg.LOSS.WITH_HEATMAPS_LOSS)<eq>len(cfg.LOSS.HEATMAPS_LOSS_FACTOR) 'LOSS.WITH_HEATMAPS_LOSS and LOSS.HEATMAPS_LOSS_FACTOR should have same length, got {} vs {}.'.format(len(cfg.LOSS.WITH_HEATMAPS_LOSS) len(cfg.LOSS.HEATMAPS_LOSS_FACTOR))<block_end><block_end>
|
<import_from_stmt>manimlib.imports *<import_stmt>matplotlib.pyplot<as>plt<import_stmt>csv<import_stmt>codecs<line_sep># import pandas as pd
# import ctypes
# # from https://www.cnpython.com/qa/81434
# def GetTextLength(text, points=10, font='思源黑体 Bold'):
# class SIZE(ctypes.Structure):
# _fields_ = [("cx", ctypes.c_long), ("cy", ctypes.c_long)]
#
# hdc = ctypes.windll.user32.GetDC(0)
# hfont = ctypes.windll.gdi32.CreateFontA(points, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, font)
# hfont_old = ctypes.windll.gdi32.SelectObject(hdc, hfont)
#
# size = SIZE(0, 0)
# ctypes.windll.gdi32.GetTextExtentPoint32A(hdc, text, len(text), ctypes.byref(size))
#
# ctypes.windll.gdi32.SelectObject(hdc, hfont_old)
# ctypes.windll.gdi32.DeleteObject(hfont)
#
# # return (size.cx, size.cy)
# return size.cx
<def_stmt>get_text_length text# xia j8 xie de 一个估算大致长度的代码
<block_start>l=0<for_stmt>ch text<block_start><if_stmt>ch<in>'abcdefghijklmnopqrstuvwxyz1234567890_'<block_start>l<augadd>0.5<block_end><else_stmt><block_start>l<augadd>1<block_end><block_end><return>l<block_end><class_stmt>Test_max_length_of_Text(Scene)<block_start><def_stmt>construct self<block_start>t=Text('哈'<times>1000 font='思源黑体 Bold' color=WHITE size=0.05).set_width(100).move_to(ORIGIN)<line_sep>print(len(t.text))<line_sep>print(len(t))<line_sep>self.add(t)<line_sep>self.wait()<block_end><block_end><class_stmt>Test(Scene)<block_start><def_stmt>construct self<block_start>im=plt.imread(r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\m_set_01.bmp')<line_sep>Z=im[: : 0]<line_sep>nx,ny=len(Z[0])-1 len(Z)-1<def_stmt>set_color4text Text<block_start><for_stmt>t Text<block_start>loc=t.get_center()<line_sep>j,i=int((loc[0]/FRAME_WIDTH+1/2)<times>nx) int((-loc[1]/FRAME_HEIGHT+1/2)<times>ny)<line_sep>t.set_color(rgb_to_hex(im[i j]/255))<block_end><block_end>text_str=''<line_sep>num=0<line_sep>text_all=VGroup()<line_sep>str_01='可爱的cigar666的粉丝'<for_stmt>i range(6000)<block_start>text_str_i='@'+str_01[0:np.random.randint(2 14)]<line_sep>num<augadd>len(text_str_i)<line_sep>text_str<augadd>text_str_i<if_stmt>num<g>400<block_start>t=Text(text_str font='思源黑体 Bold' size=0.09)<line_sep># set_color4text(t)
text_all.add(t)<line_sep>print(len(t))<line_sep>text_str=''<line_sep>num=0<block_end><block_end>text_all.arrange(DOWN buff=0.005 aligned_edge=LEFT)<if_stmt>text_all.get_height()/text_all.get_width()<g>8/14<block_start>text_all.set_height(7.9)<block_end><else_stmt><block_start>text_all.set_width(13.8)<block_end><for_stmt>text text_all<block_start>set_color4text(text)<block_end>self.add(text_all)<line_sep>self.wait(1)<block_end><block_end># data = []
#
# f = open(r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv", "r", encoding="utf8")
# reader = csv.reader(f)
# print(type(reader))
# for row in reader:
# data.append(row)
# print(row)
# fans_name = np.array(data)[:, 1]
# f.close()
# print('##################')
# print(sorted(fans_name, reverse=False, key=lambda name: len(name)))
<class_stmt>Show_followers(Scene)<block_start>CONFIG={'image_path':r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\m_set_01.bmp' # 图片路径
'data_file_path':r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv" # 粉丝数据(csv格式)
'line_length':600 # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}<def_stmt>construct self<block_start>data=[]<line_sep>f=open(self.data_file_path "r" encoding="utf8")<line_sep>reader=csv.reader(f)<line_sep>print(type(reader))<for_stmt>row reader<block_start>data.append(row)<line_sep># print(row)
<block_end>f.close()<line_sep>fans_name=np.array(data)[: 1]<line_sep>names=fans_name# 如果注释掉这行用下一行的话排序方式有区别
# names = sorted(fans_name, reverse=False, key=lambda name: get_text_length(name)) # 注释掉的话就按照长度排序一下, 否则就是按关注时间排序
im=plt.imread(self.image_path)<line_sep>Z=im[: : 0]<line_sep>nx,ny=len(Z[0])-1 len(Z)-1<line_sep>final_str=''<def_stmt>set_color4text Text<block_start><for_stmt>t Text<block_start>loc=t.get_center()<line_sep>j,i=int((loc[0]/FRAME_WIDTH+1/2)<times>nx) int((-loc[1]/FRAME_HEIGHT+1/2)<times>ny)<line_sep>t.set_color(rgb_to_hex(im[i j]/255))<block_end><block_end>text_str=''<line_sep>l_max=0<line_sep>line_num=0<line_sep>text_all=VGroup()<for_stmt>i range(1 len(names))<block_start>text_str_i='@'+names[i]<line_sep># length_i = GetTextLength(text_str_i)
length_i=get_text_length(text_str_i)<line_sep>l_max<augadd>length_i<line_sep>text_str<augadd>text_str_i<if_stmt>l_max<g>self.line_length-length_i/2<block_start>line_num<augadd>1<line_sep>text_str=str(line_num)+' '+text_str<line_sep>t=Text(text_str font='思源黑体 Bold' size=0.08)<line_sep># set_color4text(t)
text_all.add(t)<line_sep>print(l_max)<line_sep>final_str<augadd>text_str+'\n'<line_sep>text_str=''<line_sep>l_max=0<block_end><block_end>line_num<augadd>1<line_sep>text_str=str(line_num)+' '+text_str# + '@cigar666'
t=Text(text_str font='思源黑体 Bold' size=0.08)<line_sep>text_all.add(t)<line_sep>print(l_max)<line_sep>final_str<augadd>text_str# + '@cigar666' # 都渲染完了才发现我这行的+@cigar666应该写到上面去,唉蹭合照失败o(╯□╰)o
f=codecs.open('get_loction_of_fans.txt' 'w' encoding='utf-8')<line_sep>print(final_str)<line_sep>f.write(final_str)<line_sep>f.close()<line_sep>text_all.arrange(DOWN buff=0.005 aligned_edge=LEFT)<if_stmt>text_all.get_height()/text_all.get_width()<g>FRAME_HEIGHT/FRAME_WIDTH<block_start>text_all.set_height(FRAME_HEIGHT-0.1)<block_end><else_stmt><block_start>text_all.set_width(FRAME_WIDTH-0.1)<block_end><for_stmt>text text_all<block_start>set_color4text(text)<block_end>self.add(text_all)<line_sep>self.wait(1)<block_end><block_end><class_stmt>Show_followers_PythagoreanTree(Show_followers)<block_start>CONFIG={'image_path':r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\Test_PythagoreanTree.bmp' # 图片路径
'data_file_path':r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv" # 粉丝数据(csv格式)
'line_length':540 # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}<block_end><class_stmt>Show_followers_Misaka(Show_followers)<block_start>CONFIG={'image_path':r'E:\GitHub\manim\my_manim_projects\my_projects\resource\png_files\Misaka.bmp' # 图片路径
'data_file_path':r"E:\GitHub\manim\my_manim_projects\my_projects\resource\data\FollowerData.csv" # 粉丝数据(csv格式)
'line_length':540 # 每行文字的大致长度,具体粉丝数量不同这个会影响文字排出来的长宽比,
# 因为粉丝id长短不一所以难以给出具体值,建议先低分辨率试好了再调高分辨率
# 也可先缩小数据规模来预估参数
}<block_end>
|
<import_stmt>sys<import_from_stmt>scrapyd_client lib<import_from_stmt>scrapyd_client.utils indent<line_sep>INDENT_PREFIX=' '<def_stmt>deploy args<block_start>""" Deploys a Scrapy project to a Scrapyd instance.
For help on this command, invoke `scrapyd-deploy`. """<import_from_stmt>scrapyd_client deploy<line_sep>sys.argv.pop(1)<line_sep>deploy.main()<block_end><def_stmt>projects args<block_start>""" Lists all projects deployed on a Scrapyd instance. """<line_sep>_projects=lib.get_projects(args.target)<if_stmt>_projects<block_start>print('\n'.join(_projects))<block_end><block_end><def_stmt>schedule args<block_start>""" Schedules the specified spider(s). """<line_sep>job_args=dict((x[0] x[1])<for>x (y.split('=' 1)<for>y args.arg))<line_sep>_projects=lib.get_projects(args.target args.project)<for_stmt>project _projects<block_start>_spiders=lib.get_spiders(args.target project args.spider)<for_stmt>spider _spiders<block_start>job_id=lib.schedule(args.target project spider job_args)<line_sep>print(f'{project} / {spider} => {job_id}')<block_end><block_end><block_end><def_stmt>spiders args<block_start>""" Lists all spiders for the given project(s). """<line_sep>_projects=lib.get_projects(args.target args.project)<for_stmt>project _projects<block_start>project_spiders=lib.get_spiders(args.target project)<if_stmt><not>args.verbose<block_start>print(f'{project}:')<if_stmt>project_spiders<block_start>print(indent('\n'.join(project_spiders) INDENT_PREFIX))<block_end><else_stmt><block_start>print(INDENT_PREFIX+'No spiders.')<block_end><block_end><elif_stmt>project_spiders<block_start>print('\n'.join(f'{project} {x}'<for>x project_spiders))<block_end><block_end><block_end>
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""<line_sep>"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""<import_stmt>sys<import_stmt>logging<import_stmt>traceback<import_stmt>json<import_stmt>datetime<class_stmt>YLoggerSnapshot(object)<block_start><def_stmt>__init__ self criticals=0 fatals=0 errors=0 exceptions=0 warnings=0 infos=0 debugs=0<block_start>self._criticals=criticals<line_sep>self._fatals=fatals<line_sep>self._errors=errors<line_sep>self._exceptions=exceptions<line_sep>self._warnings=warnings<line_sep>self._infos=infos<line_sep>self._debugs=debugs<block_end><def_stmt>__str__ self<block_start><return>"Critical(%d) Fatal(%d) Error(%d) Exception(%d) Warning(%d) Info(%d), Debug(%d)"%(self._criticals self._fatals self._errors self._exceptions self._warnings self._infos self._debugs)<block_end><block_end><class_stmt>YLogger(object)<block_start>CRITICALS=0<line_sep>FATALS=0<line_sep>ERRORS=0<line_sep>EXCEPTIONS=0<line_sep>WARNINGS=0<line_sep>INFOS=0<line_sep>DEBUGS=0<line_sep>IS_STDOUT=<false><line_sep>IS_STDERR=<false><line_sep>PREFIX="Yadlan"<line_sep>IS_TRACEBACK=<true><line_sep>DEFAULT_LEVEL=<none><line_sep>@staticmethod<def_stmt>snapshot <block_start><return>YLoggerSnapshot(YLogger.CRITICALS YLogger.FATALS YLogger.ERRORS YLogger.EXCEPTIONS YLogger.WARNINGS YLogger.INFOS YLogger.DEBUGS)<block_end>@staticmethod<def_stmt>reset_snapshot <block_start>YLogger.CRITICALS=0<line_sep>YLogger.FATALS=0<line_sep>YLogger.ERRORS=0<line_sep>YLogger.EXCEPTIONS=0<line_sep>YLogger.WARNINGS=0<line_sep>YLogger.INFOS=0<line_sep>YLogger.DEBUGS=0<block_end>@staticmethod<def_stmt>format_message caller message<block_start><if_stmt>caller<is><not><none><block_start><if_stmt>hasattr(caller "ylogger_type")<block_start>log_type=caller.ylogger_type()<if_stmt>log_type<eq>'client'<block_start><return>"[%s] - %s"%(caller.id message)<block_end><elif_stmt>log_type<eq>'bot'<block_start><return>"[%s] [%s] - %s"%(caller.client.id<if>caller.client<is><not><none><else>"" caller.id message)<block_end><elif_stmt>log_type<eq>'brain'<block_start>clientid=""<line_sep>botid=""<if_stmt>caller.bot<is><not><none><block_start><if_stmt>caller.bot.client<is><not><none><block_start>clientid=caller.bot.client.id<block_end>botid=caller.bot.id<block_end><return>"[%s] [%s] [%s] - %s"%(clientid botid caller.id message)<block_end><elif_stmt>log_type<eq>'context'<block_start><return>"[%s] [%s] [%s] [%s] - %s"%(caller.client.id<if>caller.client<is><not><none><else>"" caller.bot.id<if>caller.bot<is><not><none><else>"" caller.brain.id<if>caller.brain<is><not><none><else>"" caller.userid message)<block_end><block_end><block_end><return>message<block_end>@staticmethod<def_stmt>set_default_level <block_start>YLogger.DEFAULT_LEVEL='none'<line_sep>level=logging.getLogger().getEffectiveLevel()<if_stmt>level<eq>logging.CRITICAL<or>level<eq>logging.FATAL<or>level<eq>logging.ERROR<block_start>YLogger.DEFAULT_LEVEL='error'<block_end><elif_stmt>level<eq>logging.WARNING<block_start>YLogger.DEFAULT_LEVEL='warning'<block_end><elif_stmt>level<eq>logging.INFO<block_start>YLogger.DEFAULT_LEVEL='info'<block_end><elif_stmt>level<eq>logging.DEBUG<block_start>YLogger.DEFAULT_LEVEL='debug'<block_end>logging.getLogger().setLevel(level=logging.DEBUG)<block_end>@staticmethod<def_stmt>check_loglevel caller level<block_start><if_stmt>YLogger.DEFAULT_LEVEL<is><none><block_start><return>logging.getLogger().isEnabledFor(level)<block_end>out_level=YLogger.DEFAULT_LEVEL<if_stmt>caller<is><not><none><block_start><if_stmt>hasattr(caller "get_loglevel")<block_start>client_loglevel=caller.get_loglevel()<if_stmt>client_loglevel<is><not><none><block_start>out_level=client_loglevel<block_end><block_end><block_end><if_stmt>level<eq>logging.CRITICAL<or>level<eq>logging.FATAL<or>level<eq>logging.ERROR<block_start><if_stmt>out_level<in>['error' 'warning' 'info' 'debug']<block_start><return><true><block_end><block_end><elif_stmt>level<eq>logging.WARNING<block_start><if_stmt>out_level<in>['warning' 'info' 'debug']<block_start><return><true><block_end><block_end><elif_stmt>level<eq>logging.INFO<block_start><if_stmt>out_level<in>['info' 'debug']<block_start><return><true><block_end><block_end><elif_stmt>level<eq>logging.DEBUG<block_start><if_stmt>out_level<eq>'debug'<block_start><return><true><block_end><block_end><return><false><block_end>@staticmethod<def_stmt>critical caller message *args **kwargs<block_start>YLogger.CRITICALS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.CRITICAL)<block_start>logging.critical(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stderr(caller "critical" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>fatal caller message *args **kwargs<block_start>YLogger.FATALS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.FATAL)<block_start>logging.fatal(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stderr(caller "fatal" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>error caller message *args **kwargs<block_start>YLogger.ERRORS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.ERROR)<block_start>logging.error(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stderr(caller "error" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>exception caller message exception *args **kwargs<block_start>YLogger.EXCEPTIONS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.ERROR)<block_start>excep_msg="%s [%s]"%(message str(exception))<line_sep>logging.error(YLogger.format_message(caller excep_msg) *args **kwargs)<if_stmt>YLogger.IS_TRACEBACK<is><true><and>exception<is><not><none><block_start>tb_lines=[line.rstrip('\n')<for>line traceback.format_exception(exception.__class__ exception exception.__traceback__)]<for_stmt>line tb_lines<block_start>logging.error(YLogger.format_message(caller line))<line_sep>YLogger.yadlan_stderr(caller "exception" message *args **kwargs)<block_end><block_end><block_end><block_end>@staticmethod<def_stmt>warning caller message *args **kwargs<block_start>YLogger.WARNINGS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.WARNING)<block_start>logging.warning(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stdout(caller "warning" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>info caller message *args **kwargs<block_start>YLogger.INFOS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.INFO)<block_start>logging.info(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stdout(caller "info" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>debug caller message *args **kwargs<block_start>YLogger.DEBUGS<augadd>1<if_stmt>YLogger.check_loglevel(caller logging.DEBUG)<block_start>logging.debug(YLogger.format_message(caller message) *args **kwargs)<line_sep>YLogger.yadlan_stdout(caller "debug" message *args **kwargs)<block_end><block_end>@staticmethod<def_stmt>set_stdout status<block_start><if_stmt>status<ne>"True"<block_start>YLogger.IS_STDOUT=<false><block_end><else_stmt><block_start>YLogger.IS_STDOUT=<true><block_end><block_end>@staticmethod<def_stmt>set_stderr status<block_start><if_stmt>status<ne>"True"<block_start>YLogger.IS_STDERR=<false><block_end><else_stmt><block_start>YLogger.IS_STDERR=<true><block_end><block_end>@staticmethod<def_stmt>set_prefix prefix<block_start>YLogger.PREFIX=prefix<block_end>@staticmethod<def_stmt>set_traceback setting:bool<block_start>YLogger.IS_TRACEBACK=setting<block_end>@staticmethod<def_stmt>format_yadlan_message prefix level caller message<block_start>botid=""<line_sep>brainid=""<line_sep>userid=""<try_stmt><block_start>botid=caller.bot.id<block_end><except_stmt>Exception<block_start><pass><block_end><try_stmt><block_start>brainid=caller.brain.id<block_end><except_stmt>Exception<block_start><pass><block_end><try_stmt><block_start>userid=caller.userid<block_end><except_stmt>Exception<block_start><pass><block_end>messageDict=message<try_stmt><block_start>messageDict=json.loads(message encoding="utf-8")<block_end><except_stmt>Exception<block_start><pass><block_end>dt_now=datetime.datetime.now()<line_sep>dict={"time":str(dt_now) "status":level "bot_id":prefix "botid":botid "brainid":brainid "userid":userid "message":messageDict}<line_sep><return>json.dumps(dict ensure_ascii=<false>)<block_end>@staticmethod<def_stmt>yadlan_stdout caller level message *args **kwargs<block_start><if_stmt>YLogger.IS_STDOUT<block_start><if_stmt>len(args)<eq>0<block_start>sys.stdout.write(YLogger.format_yadlan_message(YLogger.PREFIX level caller message)+"\n")<block_end><else_stmt><block_start>sys.stdout.write(YLogger.format_yadlan_message(YLogger.PREFIX level caller message)%args+"\n")<block_end>sys.stdout.flush()<block_end><block_end>@staticmethod<def_stmt>yadlan_stderr caller level message *args **kwargs<block_start><if_stmt>YLogger.IS_STDERR<block_start>sys.stderr.write(YLogger.format_yadlan_message(YLogger.PREFIX level caller message)%args+"\n")<line_sep>sys.stderr.flush()<block_end><block_end><block_end>
|
<import_stmt>re<import_from_stmt>ztag.annotation Annotation<import_from_stmt>ztag.annotation OperatingSystem<import_from_stmt>ztag.annotation Type<import_from_stmt>ztag.annotation Manufacturer<import_from_stmt>ztag protocols<import_stmt>ztag.test<class_stmt>FtpBelkin(Annotation)<block_start>protocol=protocols.FTP<line_sep>subprotocol=protocols.FTP.BANNER<line_sep>port=<none><line_sep>manufact_re=re.compile("^220 Belkin Network USB Hub Ver \d+\.\d+\.\d+ FTP" re.IGNORECASE)<line_sep>tests={"FtpBelkin_1":{"global_metadata":{"device_type":Type.USB_HUB "manufacturer":Manufacturer.BELKIN}}}<def_stmt>process self obj meta<block_start>banner=obj["banner"]<line_sep>tagged=<false><if_stmt>self.manufact_re.search(banner)<block_start>meta.global_metadata.device_type=Type.USB_HUB<line_sep>meta.global_metadata.manufacturer=Manufacturer.BELKIN<line_sep>tagged=<true><block_end><if_stmt>tagged<block_start><return>meta<block_end><else_stmt><block_start><return><none><block_end><block_end><block_end>
|
"""Utility functions for version checking."""<import_stmt>warnings<line_sep>__all__=['check_version']<def_stmt>check_version min_version warning_only=<false><block_start>"""Check the version of gluoncv satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version : str
Minimum version
warning_only : bool
Printing a warning instead of throwing an exception.
"""<import_from_stmt>.. __version__<import_from_stmt>distutils.version LooseVersion<line_sep>bad_version=LooseVersion(__version__)<l>LooseVersion(min_version)<if_stmt>bad_version<block_start>msg='Installed GluonCV version (%s) does not satisfy the '<concat>'minimum required version (%s)'%(__version__ min_version)<if_stmt>warning_only<block_start>warnings.warn(msg)<block_end><else_stmt><block_start><raise>AssertionError(msg)<block_end><block_end><block_end>
|
'Project-wide default values'<import_stmt>sys<import_from_stmt>os path<line_sep>FORGE_ROOT=path.dirname(path.dirname(__file__))<line_sep>CONFIG_FILE=path.join(FORGE_ROOT 'forge_build.json')<line_sep>PASSWORD="<PASSWORD>"<line_sep>SRC_DIR='src'<line_sep>APP_CONFIG_FILE=path.join(SRC_DIR 'config.json')<line_sep>IDENTITY_FILE=path.join(SRC_DIR 'identity.json')<line_sep>LOCAL_CONFIG_FILE='local_config.json'<line_sep>TEMPLATE_DIR='.template'<line_sep>INSTRUCTIONS_DIR=TEMPLATE_DIR<line_sep>
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
__author__='<EMAIL> (<NAME>)'<import_stmt>datetime<import_stmt>logging<import_stmt>mock<import_stmt>time<import_from_stmt>functools partial<import_from_stmt>tornado options<import_from_stmt>viewfinder.backend.base otp util<import_from_stmt>viewfinder.backend.base.testing async_test<import_from_stmt>viewfinder.backend.db client_log<import_from_stmt>viewfinder.backend.www json_schema<import_from_stmt>viewfinder.backend.www.test service_base_test<import_from_stmt>viewfinder.backend.www.test.service_base_test ClientLogRecord<class_stmt>NewClientLogUrlTestCase(service_base_test.ServiceBaseTestCase)<block_start><def_stmt>setUp self<block_start>super(NewClientLogUrlTestCase self).setUp()<line_sep># Write sample client logs.
self._cur_t=time.time()<line_sep>self._t_minus_1d=self._cur_t-24<times>60<times>60<line_sep>self._t_minus_2d=self._cur_t-2<times>24<times>60<times>60<line_sep>self._logs=[(self._cookie ClientLogRecord(self._t_minus_2d 'cl1.t-2' 'log 1, t-2')) (self._cookie ClientLogRecord(self._t_minus_1d 'cl1.t-1' 'log 1, t-1')) (self._cookie ClientLogRecord(self._cur_t 'cl1.t.1' 'log 1, t')) (self._cookie ClientLogRecord(self._cur_t 'cl1.t.2' 'log 2, t')) (self._cookie2 ClientLogRecord(self._cur_t 'cl2' 'user 2, log 1, t'))]<for_stmt>user_cookie,log self._logs<block_start>self._WriteClientLog(user_cookie log)<block_end><block_end><def_stmt>testListClientLogs self<block_start>"""Verify listing of client logs."""<line_sep>start_timestamp=self._cur_t<line_sep>end_timestamp=start_timestamp<line_sep>response_dict=self._tester.SendAdminRequest('list_client_logs' {'user_id':self._users[0].user_id 'start_timestamp':start_timestamp 'end_timestamp':end_timestamp})<line_sep>urls=self._FilterList(response_dict['log_urls'])<line_sep>self.assertEqual(2 len(urls))<line_sep>content=self._FetchClientLog(urls[0]['url'])<line_sep>self.assertEqual('log 1, t' content)<line_sep>content=self._FetchClientLog(urls[1]['url'])<line_sep>self.assertEqual('log 2, t' content)<block_end><def_stmt>testMultipleDates self<block_start>"""Verify logs can be listed for multiple dates."""<line_sep>start_timestamp=self._t_minus_2d<line_sep>end_timestamp=self._cur_t<line_sep>response_dict=self._tester.SendAdminRequest('list_client_logs' {'user_id':self._users[0].user_id 'start_timestamp':start_timestamp 'end_timestamp':end_timestamp})<line_sep>urls=self._FilterList(response_dict['log_urls'])<line_sep>self.assertEqual(4 len(urls))<block_end><def_stmt>testListFilter self<block_start>"""Verify logs can be filtered via regexp."""<line_sep>start_timestamp=self._cur_t<line_sep>end_timestamp=self._cur_t<line_sep>response_dict=self._tester.SendAdminRequest('list_client_logs' {'user_id':self._users[0].user_id 'start_timestamp':start_timestamp 'end_timestamp':end_timestamp 'filter':'cl1.t.2'})<line_sep>urls=self._FilterList(response_dict['log_urls'])<line_sep>self.assertEqual(1 len(urls))<line_sep>self.assertTrue(urls[0]['filename'].endswith('dev-2-cl1.t.2'))<block_end>@mock.patch.object(client_log 'MAX_CLIENT_LOGS' 1)<def_stmt>testLimit self<block_start>"""Verify limit is respected."""<line_sep>response_dict=self._tester.SendAdminRequest('list_client_logs' {'user_id':self._users[0].user_id 'start_timestamp':self._cur_t 'end_timestamp':self._cur_t 'filter':'dev-2'})<line_sep>urls=response_dict['log_urls']<line_sep>self.assertEqual(2 len(urls))<line_sep>self.assertTrue(urls[0]['filename'].endswith('dev-2-cl1.t.1'))<line_sep>self.assertTrue(urls[1]['filename'].endswith('dev-2-cl1.t.2'))<block_end><def_stmt>_FetchClientLog self url<block_start>"""Fetches the client log specified by "url" and returns the
contents to "callback".
"""<line_sep>response=self._RunAsync(self._tester.http_client.fetch url method='GET')<line_sep>self.assertEqual(200 response.code)<line_sep><return>response.body<block_end><def_stmt>_FilterList self log_urls<block_start>"""Remove op logs from response that were created by base class user
registration.
"""<line_sep><return>[log_url<for>log_url log_urls<if>'Operation'<not><in>log_url['url']]<block_end><block_end>
|
# -- coding: iso8859-1
"""Generic option parser class. This class can be used
to write code that will parse command line options for
an application by invoking one of the standard Python
library command argument parser modules optparse or
getopt.
The class first tries to use optparse. It it is not there
(< Python 2.3), it invokes getopt. However, this is
transparent to the application which uses the class.
The class requires a dictionary with entries of the following
form for each command line option.
'option_var' : ('short=<short option>','long=<long option>',
'help=<help string>', 'meta=<meta variable>',
'default=<default value>', 'type=<option type>')
where, 'option_var' is the key for the option in the final
dictionary of option-value pairs. The value is a tuple of
strings, where each string consists of entries of the form,
'key=value', where 'key' is borrowed from the way optparse
represents each variables for an option setting.
To parse the arguments, call the method 'parse_arguments'.
The return value is a dictionary of the option-value pairs."""<import_stmt>sys<line_sep>__author__="<NAME>"<class_stmt>GenericOptionParserError(Exception)<block_start><def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>__str__ self<block_start><return>str(self.value)<block_end><block_end><class_stmt>GenericOptionParser<block_start>""" Generic option parser using
either optparse or getopt """<def_stmt>__init__ self optmap<block_start>self._optmap=self._parse_optmap(optmap)<line_sep>self._optdict={}<line_sep>self.maxw=24<block_end><def_stmt>_parse_optmap self map<block_start>""" Internal method -> Parse option
map containing tuples and convert the
tuples to a dictionary """<line_sep>optmap={}<for_stmt>key,value map.items()<block_start>d={}<for_stmt>item value<block_start><if_stmt><not>item<block_start><continue><block_end>var,val=item.split('=')<line_sep>d[var]=val<block_end>optmap[key]=d<block_end><return>optmap<block_end><def_stmt>parse_arguments self<block_start>""" Parse command line arguments and
return a dictionary of option-value pairs """<try_stmt><block_start>self.optparse=__import__('optparse')<line_sep># For invoking help, when no arguments
# are passed.
<if_stmt>len(sys.argv)<eq>1<block_start>sys.argv.append('-h')<block_end>self._parse_arguments1()<block_end><except_stmt>ImportError<block_start><try_stmt><block_start><import_stmt>getopt<line_sep>self.getopt=__import__('getopt')<line_sep>self._parse_arguments2()<block_end><except_stmt>ImportError<block_start><raise>GenericOptionParserError 'Fatal Error: No optparse or getopt modules found'<block_end><block_end><return>self._optdict<block_end><def_stmt>_parse_arguments1 self<block_start>""" Parse command-line arguments using optparse """<line_sep>p=self.optparse.OptionParser()<for_stmt>key,value self._optmap.items()# Option destination is the key itself
<block_start>option=key<line_sep># Default action is 'store'
action='store'<line_sep># Short option string
sopt=value.get('short' '')<line_sep># Long option string
lopt=value.get('long' '')<line_sep># Help string
helpstr=value.get('help' '')<line_sep># Meta var
meta=value.get('meta' '')<line_sep># Default value
defl=value.get('default' '')<line_sep># Default type is 'string'
typ=value.get('type' 'string')<line_sep># If bool type...
<if_stmt>typ<eq>'bool'<block_start>action='store_true'<line_sep>defl=bool(str(defl)<eq>'True')<block_end><if_stmt>sopt<block_start>sopt='-'+sopt<block_end><if_stmt>lopt<block_start>lopt='--'+lopt<block_end># Add option
p.add_option(sopt lopt dest=option help=helpstr metavar=meta action=action default=defl)<block_end>(options args)=p.parse_args()<line_sep>self._optdict=options.__dict__<block_end><def_stmt>_parse_arguments2 self<block_start>""" Parse command-line arguments using getopt """<line_sep># getopt requires help string to
# be generated.
<if_stmt>len(sys.argv)<eq>1<block_start>sys.exit(self._usage())<block_end>shortopt,longopt='h' ['help']<line_sep># Create short option string and long option
# list for getopt
<for_stmt>key,value self._optmap.items()<block_start>sopt=value.get('short' '')<line_sep>lopt=value.get('long' '')<line_sep>typ=value.get('type' 'string')<line_sep>defl=value.get('default' '')<line_sep># If bool type...
<if_stmt>typ<eq>'bool'<block_start>defl=bool(str(defl)<eq>'True')<block_end># Set default value
self._optdict[key]=defl<if_stmt>typ<eq>'bool'<block_start><if_stmt>sopt<block_start>shortopt<augadd>sopt<block_end><if_stmt>lopt<block_start>longopt.append(lopt)<block_end><block_end><else_stmt><block_start><if_stmt>sopt<block_start>shortopt="".join((shortopt sopt ':'))<block_end><if_stmt>lopt<block_start>longopt.append(lopt+'=')<block_end><block_end><block_end># Parse
(optlist args)=self.getopt.getopt(sys.argv[1:] shortopt longopt)<line_sep># Match options
<for_stmt>opt,val optlist# Invoke help
<block_start><if_stmt>opt<in>('-h' '--help')<block_start>sys.exit(self._usage())<block_end><for_stmt>key,value self._optmap.items()<block_start>sopt='-'+value.get('short' '')<line_sep>lopt='--'+value.get('long' '')<line_sep>typ=value.get('type' 'string')<if_stmt>opt<in>(sopt lopt)<block_start><if_stmt>typ<eq>'bool'<block_start>val=<true><block_end>self._optdict[key]=val<del_stmt>self._optmap[key]<line_sep><break><block_end><block_end><block_end><block_end><def_stmt>_usage self<block_start>""" Generate and return a help string
for the program, similar to the one
generated by optparse """<line_sep>usage=["usage: %s [options]\n\n"%sys.argv[0]]<line_sep>usage.append("options:\n")<line_sep>options=[(' -h, --help' 'show this help message and exit\n')]<line_sep>maxlen=0<for_stmt>value self._optmap.values()<block_start>sopt=value.get('short' '')<line_sep>lopt=value.get('long' '')<line_sep>help=value.get('help' '')<line_sep>meta=value.get('meta' '')<line_sep>optstr=""<if_stmt>sopt<block_start>optstr="".join((' -' sopt meta))<block_end><if_stmt>lopt<block_start>optstr="".join((optstr ', --' lopt))<block_end><if_stmt>meta<block_start>optstr="".join((optstr '=' meta))<block_end>l=len(optstr)<if_stmt>l<g>maxlen<block_start>maxlen=l<block_end>options.append((optstr help))<block_end><for_stmt>x range(len(options))<block_start>optstr=options[x][0]<line_sep>helpstr=options[x][1]<if_stmt>maxlen<l>self.maxw-1<block_start>usage.append("".join((optstr (maxlen-len(optstr)+2)<times>' ' helpstr '\n')))<block_end><elif_stmt>len(optstr)<l>self.maxw-1<block_start>usage.append("".join((optstr (self.maxw-len(optstr))<times>' ' helpstr '\n')))<block_end><else_stmt><block_start>usage.append("".join((optstr '\n' self.maxw<times>' ' helpstr '\n')))<block_end><block_end><return>"".join(usage)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>d={'infile':('short=i' 'long=in' 'help=Input file for the program' 'meta=IN') 'outfile':('short=o' 'long=out' 'help=Output file for the program' 'meta=OUT') 'verbose':('short=V' 'long=verbose' 'help=Be verbose in output' 'type=bool')}<line_sep>g=GenericOptionParser(d)<line_sep>optdict=g.parse_arguments()<for_stmt>key,value optdict.items()# Use the option and the value in
# your program
<block_start><ellipsis><block_end><block_end>
|
default_app_config='instances.apps.InstancesConfig'<line_sep>
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>re<import_stmt>glob<import_stmt>numpy<as>np<import_from_stmt>multiprocessing Pool<import_from_stmt>functools partial<import_from_stmt>shapely.geometry Polygon<import_stmt>argparse<line_sep>nms_thresh=0.1<line_sep>class_name_15=['plane' 'baseball-diamond' 'bridge' 'ground-track-field' 'small-vehicle' 'large-vehicle' 'ship' 'tennis-court' 'basketball-court' 'storage-tank' 'soccer-ball-field' 'roundabout' 'harbor' 'swimming-pool' 'helicopter']<line_sep>class_name_16=['plane' 'baseball-diamond' 'bridge' 'ground-track-field' 'small-vehicle' 'large-vehicle' 'ship' 'tennis-court' 'basketball-court' 'storage-tank' 'soccer-ball-field' 'roundabout' 'harbor' 'swimming-pool' 'helicopter' 'container-crane']<def_stmt>rbox_iou g p<block_start>"""
iou of rbox
"""<line_sep>g=np.array(g)<line_sep>p=np.array(p)<line_sep>g=Polygon(g[:8].reshape((4 2)))<line_sep>p=Polygon(p[:8].reshape((4 2)))<line_sep>g=g.buffer(0)<line_sep>p=p.buffer(0)<if_stmt><not>g.is_valid<or><not>p.is_valid<block_start><return>0<block_end>inter=Polygon(g).intersection(Polygon(p)).area<line_sep>union=g.area+p.area-inter<if_stmt>union<eq>0<block_start><return>0<block_end><else_stmt><block_start><return>inter/union<block_end><block_end><def_stmt>py_cpu_nms_poly_fast dets thresh<block_start>"""
Args:
dets: pred results
thresh: nms threshold
Returns: index of keep
"""<line_sep>obbs=dets[: 0:-1]<line_sep>x1=np.min(obbs[: 0::2] axis=1)<line_sep>y1=np.min(obbs[: 1::2] axis=1)<line_sep>x2=np.max(obbs[: 0::2] axis=1)<line_sep>y2=np.max(obbs[: 1::2] axis=1)<line_sep>scores=dets[: 8]<line_sep>areas=(x2-x1+1)<times>(y2-y1+1)<line_sep>polys=[]<for_stmt>i range(len(dets))<block_start>tm_polygon=[dets[i][0] dets[i][1] dets[i][2] dets[i][3] dets[i][4] dets[i][5] dets[i][6] dets[i][7]]<line_sep>polys.append(tm_polygon)<block_end>polys=np.array(polys)<line_sep>order=scores.argsort()[::-1]<line_sep>keep=[]<while_stmt>order.size<g>0<block_start>ovr=[]<line_sep>i=order[0]<line_sep>keep.append(i)<line_sep>xx1=np.maximum(x1[i] x1[order[1:]])<line_sep>yy1=np.maximum(y1[i] y1[order[1:]])<line_sep>xx2=np.minimum(x2[i] x2[order[1:]])<line_sep>yy2=np.minimum(y2[i] y2[order[1:]])<line_sep>w=np.maximum(0.0 xx2-xx1)<line_sep>h=np.maximum(0.0 yy2-yy1)<line_sep>hbb_inter=w<times>h<line_sep>hbb_ovr=hbb_inter/(areas[i]+areas[order[1:]]-hbb_inter)<line_sep># h_keep_inds = np.where(hbb_ovr == 0)[0]
h_inds=np.where(hbb_ovr<g>0)[0]<line_sep>tmp_order=order[h_inds+1]<for_stmt>j range(tmp_order.size)<block_start>iou=rbox_iou(polys[i] polys[tmp_order[j]])<line_sep>hbb_ovr[h_inds[j]]=iou<line_sep># ovr.append(iou)
# ovr_index.append(tmp_order[j])
<block_end><try_stmt><block_start><if_stmt>math.isnan(ovr[0])<block_start>pdb.set_trace()<block_end><block_end><except_stmt><block_start><pass><block_end>inds=np.where(hbb_ovr<le>thresh)[0]<line_sep>order=order[inds+1]<block_end><return>keep<block_end><def_stmt>poly2origpoly poly x y rate<block_start>origpoly=[]<for_stmt>i range(int(len(poly)/2))<block_start>tmp_x=float(poly[i<times>2]+x)/float(rate)<line_sep>tmp_y=float(poly[i<times>2+1]+y)/float(rate)<line_sep>origpoly.append(tmp_x)<line_sep>origpoly.append(tmp_y)<block_end><return>origpoly<block_end><def_stmt>nmsbynamedict nameboxdict nms thresh<block_start>"""
Args:
nameboxdict: nameboxdict
nms: nms
thresh: nms threshold
Returns: nms result as dict
"""<line_sep>nameboxnmsdict={x:[]<for>x nameboxdict}<for_stmt>imgname nameboxdict<block_start>keep=nms(np.array(nameboxdict[imgname]) thresh)<line_sep>outdets=[]<for_stmt>index keep<block_start>outdets.append(nameboxdict[imgname][index])<block_end>nameboxnmsdict[imgname]=outdets<block_end><return>nameboxnmsdict<block_end><def_stmt>merge_single output_dir nms pred_class_lst<block_start>"""
Args:
output_dir: output_dir
nms: nms
pred_class_lst: pred_class_lst
class_name: class_name
Returns:
"""<line_sep>class_name,pred_bbox_list=pred_class_lst<line_sep>nameboxdict={}<for_stmt>line pred_bbox_list<block_start>splitline=line.split(' ')<line_sep>subname=splitline[0]<line_sep>splitname=subname.split('__')<line_sep>oriname=splitname[0]<line_sep>pattern1=re.compile(r'__\d+___\d+')<line_sep>x_y=re.findall(pattern1 subname)<line_sep>x_y_2=re.findall(r'\d+' x_y[0])<line_sep>x,y=int(x_y_2[0]) int(x_y_2[1])<line_sep>pattern2=re.compile(r'__([\d+\.]+)__\d+___')<line_sep>rate=re.findall(pattern2 subname)[0]<line_sep>confidence=splitline[1]<line_sep>poly=list(map(float splitline[2:]))<line_sep>origpoly=poly2origpoly(poly x y rate)<line_sep>det=origpoly<line_sep>det.append(confidence)<line_sep>det=list(map(float det))<if_stmt>(oriname<not><in>nameboxdict)<block_start>nameboxdict[oriname]=[]<block_end>nameboxdict[oriname].append(det)<block_end>nameboxnmsdict=nmsbynamedict(nameboxdict nms nms_thresh)<line_sep># write result
dstname=os.path.join(output_dir class_name+'.txt')<with_stmt>open(dstname 'w')<as>f_out<block_start><for_stmt>imgname nameboxnmsdict<block_start><for_stmt>det nameboxnmsdict[imgname]<block_start>confidence=det[-1]<line_sep>bbox=det[0:-1]<line_sep>outline=imgname+' '+str(confidence)+' '+' '.join(map(str bbox))<line_sep>f_out.write(outline+'\n')<block_end><block_end><block_end><block_end><def_stmt>dota_generate_test_result pred_txt_dir output_dir='output' dota_version='v1.0'<block_start>"""
pred_txt_dir: dir of pred txt
output_dir: dir of output
dota_version: dota_version v1.0 or v1.5 or v2.0
"""<line_sep>pred_txt_list=glob.glob("{}/*.txt".format(pred_txt_dir))<line_sep># step1: summary pred bbox
pred_classes={}<line_sep>class_lst=class_name_15<if>dota_version<eq>'v1.0'<else>class_name_16<for_stmt>class_name class_lst<block_start>pred_classes[class_name]=[]<block_end><for_stmt>current_txt pred_txt_list<block_start>img_id=os.path.split(current_txt)[1]<line_sep>img_id=img_id.split('.txt')[0]<with_stmt>open(current_txt)<as>f<block_start>res=f.readlines()<for_stmt>item res<block_start>item=item.split(' ')<line_sep>pred_class=item[0]<line_sep>item[0]=img_id<line_sep>pred_bbox=' '.join(item)<line_sep>pred_classes[pred_class].append(pred_bbox)<block_end><block_end><block_end>pred_classes_lst=[]<for_stmt>class_name pred_classes.keys()<block_start>print('class_name: {}, count: {}'.format(class_name len(pred_classes[class_name])))<line_sep>pred_classes_lst.append((class_name pred_classes[class_name]))<block_end># step2: merge
pool=Pool(len(class_lst))<line_sep>nms=py_cpu_nms_poly_fast<line_sep>mergesingle_fn=partial(merge_single output_dir nms)<line_sep>pool.map(mergesingle_fn pred_classes_lst)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='dota anno to coco')<line_sep>parser.add_argument('--pred_txt_dir' help='path of pred txt dir')<line_sep>parser.add_argument('--output_dir' help='path of output dir' default='output')<line_sep>parser.add_argument('--dota_version' help='dota_version, v1.0 or v1.5 or v2.0' type=str default='v1.0')<line_sep>args=parser.parse_args()<line_sep># process
dota_generate_test_result(args.pred_txt_dir args.output_dir args.dota_version)<line_sep>print('done!')<block_end>
|
<import_stmt>importlib<import_from_stmt>typing Union<import_from_stmt>torch optim<import_from_stmt>super_gradients.common.factories.base_factory AbstractFactory<import_from_stmt>super_gradients.training.utils.optimizers.rmsprop_tf RMSpropTF<import_from_stmt>super_gradients.training.utils.optimizers.lamb Lamb<class_stmt>OptimizersTypeFactory(AbstractFactory)<block_start>"""
This is a special factory for torch.optim.Optimizer.
This factory does not instantiate an object but rather return the type, since optimizer instantiation
requires the model to be instantiated first
"""<def_stmt>__init__ self<block_start>self.type_dict={"SGD":optim.SGD "Adam":optim.Adam "RMSprop":optim.RMSprop "RMSpropTF":RMSpropTF "Lamb":Lamb}<block_end><def_stmt>get self conf:Union[str]<block_start>"""
Get a type.
:param conf: a configuration
if string - assumed to be a type name (not the real name, but a name defined in the Factory)
a dictionary is not supported, since the actual instantiation takes place elsewhere
If provided value is not one of the three above, the value will be returned as is
"""<if_stmt>isinstance(conf str)<block_start><if_stmt>conf<in>self.type_dict<block_start><return>self.type_dict[conf]<block_end><else_stmt><block_start><try_stmt><block_start>lib='.'.join(conf.split('.')[:-1])<line_sep>module=conf.split('.')[-1]<line_sep>lib=importlib.import_module(lib)# Import the required packages
class_type=lib.__dict__[module]<line_sep><return>class_type<block_end><except_stmt>RuntimeError<block_start><raise>RuntimeError(f"Unknown object type: {conf} in configuration. valid types are: {self.type_dict.keys()} or a class "<concat>"type available in the env (or the form 'package_name.sub_package.MyClass'")<block_end><block_end><block_end><else_stmt><block_start><return>conf<block_end><block_end><block_end>
|
<import_stmt>sys<import_stmt>os.path<line_sep>sys.path.append(os.path.abspath(__file__+"\..\.."))<import_stmt>windows<import_stmt>windows.generated_def<as>gdef<line_sep>devmgr=windows.system.device_manager<line_sep>print("Device manager is {0}".format(devmgr))<line_sep>print("Enumerating the first 3 device classes")<for_stmt>cls devmgr.classes[:3]<block_start>print(" * {0}".format(cls))<block_end>print("Finding device class 'System'")<line_sep># Allow devmgr.classes["name"] ?
system_cls=[cls<for>cls devmgr.classes<if>cls.name<eq>b"System"][0]<line_sep>print(" * {0}".format(system_cls))<line_sep>print(" Enumerating some devices of 'System'")<line_sep>devices=system_cls.devices.all()<for_stmt>devinst (devices[0] devices[25] devices[35])# Some "random" devices to have interesting ones
<block_start>print(" * {0}".format(devinst))<line_sep>devconf=devinst.allocated_configuration<if_stmt><not>devconf<block_start><continue><block_end>print(" Enumerating allocated resources:")<for_stmt>resource devconf.resources<block_start>print(" * {0}".format(resource))<block_end><block_end># python64 samples\device\device_manager.py
# Device manager is <windows.winobject.device_manager.DeviceManager object at 0x0000000003669908>
# Enumerating the first 3 device classes
# * <DeviceClass name="XboxComposite" guid=05F5CFE2-4733-4950-A6BB-07AAD01A3A84>
# * <DeviceClass name="DXGKrnl" guid=1264760F-A5C8-4BFE-B314-D56A7B44A362>
# * <DeviceClass name="RemotePosDevice" guid=13E42DFA-85D9-424D-8646-28A70F864F9C>
# Finding device class 'System'
# * <DeviceClass name="System" guid=4D36E97D-E325-11CE-BFC1-08002BE10318>
# Enumerating some devices of 'System'
# * <DeviceInstance "Motherboard resources" (id=1)>
# * <DeviceInstance "Microsoft ACPI-Compliant Embedded Controller" (id=26)>
# Enumerating allocated resources:
# * <IoResource : [0x00000000000062-0x00000000000062]>
# * <IoResource : [0x00000000000066-0x00000000000066]>
# * <DeviceInstance "High Definition Audio Controller" (id=36)>
# Enumerating allocated resources:
# * <MemoryResource : [0x000000f7080000-0x000000f7083fff]>
# * <DevicePrivateResource type=ResType_DevicePrivate(0x8001)>
# * <IrqResource : [0x00000000000011]>
|
<import_from_stmt>typing List<import_stmt>numpy<as>np<import_from_stmt>shapely.geometry Point<import_from_stmt>nuplan.common.actor_state.state_representation Point2D<import_from_stmt>nuplan.common.maps.abstract_map AbstractMap<import_from_stmt>nuplan.planning.metrics.evaluation_metrics.base.metric_base MetricBase<import_from_stmt>nuplan.planning.metrics.metric_result MetricStatistics MetricStatisticsType TimeSeries<import_from_stmt>nuplan.planning.metrics.utils.route_extractor get_route<import_from_stmt>nuplan.planning.metrics.utils.state_extractors extract_ego_center extract_ego_time_point<import_from_stmt>nuplan.planning.scenario_builder.abstract_scenario AbstractScenario<import_from_stmt>nuplan.planning.simulation.history.simulation_history SimulationHistory<class_stmt>DistanceToBaselineStatistics(MetricBase)<block_start>"""Statistics on distance of center of ego from nearest baseline."""<def_stmt>__init__ self name:str category:str<arrow><none><block_start>"""
Initializes the DistanceToBaselineStatistics class
:param name: Metric name
:param category: Metric category.
"""<line_sep>super().__init__(name=name category=category)<block_end>@staticmethod<def_stmt>compute_distance_to_route_baseline map_api:AbstractMap poses:List[Point2D]<arrow>List[float]<block_start>"""
Returns minimum distances of each ego pose to the baseline of a lane or lane_connector that it
belongs to one, if it does not belong to any lane or lane_connector inf is returned
:param map_api: a map
:param ego_poses: list of ego poses
:return list of ditances to baseline, or inf.
"""<line_sep># Get the list of lane or lane_connectors ego belongs to.
ego_route=get_route(map_api=map_api poses=poses)<line_sep># For each (route_obj, pose), if route_obj is not None, compute the distance of pose from its
# baseline, otherwise set distance to inf
distances=[]<for_stmt>route_obj,pose zip(ego_route poses)<block_start><if_stmt>len(route_obj)<eq>0<block_start>distances.append(np.inf)<line_sep><continue><block_end>baseline_paths=[one_route_obj.baseline_path()<for>one_route_obj route_obj]<line_sep>dist_to_route=min(baseline_path.linestring.distance(Point(pose.x pose.y))<for>baseline_path baseline_paths)<line_sep>distances.append(dist_to_route)<block_end><return>distances<block_end><def_stmt>compute self history:SimulationHistory scenario:AbstractScenario<arrow>List[MetricStatistics]<block_start>"""
Returns the estimated metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return: the estimated metric.
"""<line_sep># Extract xy coordinates of center of ego from history.
ego_states=history.extract_ego_state<line_sep>ego_poses=extract_ego_center(ego_states)<line_sep># Compute distance of center poses from the baseline of route objects.
distance_to_baseline=self.compute_distance_to_route_baseline(map_api=history.map_api poses=ego_poses)<line_sep>ego_timestamps=extract_ego_time_point(ego_states)<line_sep>time_series=TimeSeries(unit='meters' time_stamps=list(ego_timestamps) values=list(distance_to_baseline))<line_sep>statistics_type_list=[MetricStatisticsType.MAX MetricStatisticsType.MEAN]<line_sep>metric_statistics=self._compute_time_series_statistic(time_series=time_series statistics_type_list=statistics_type_list)<line_sep>results=self._construct_metric_results(metric_statistics=metric_statistics scenario=scenario time_series=time_series)<line_sep><return>results<block_end><block_end># type: ignore
|
# -*- coding: utf-8 -*-
#
# Timing
#
<import_stmt>numpy<as>np<import_stmt>re os<import_stmt>include.id3reader_p3<as>id3<import_from_stmt>shutil copy<import_from_stmt>timing *<import_from_stmt>metadata *<def_stmt>get_timed_osu_file music_path input_filename="assets/template.osu" output_filename="timing.osu" game_mode=0 mania_key_count=<none><block_start><with_stmt>open(input_filename)<as>osu_file<block_start>osu_text=osu_file.read()<block_end>rdr=id3.Reader(music_path)<line_sep>artist=rdr.get_value("performer")<if_stmt>artist<is><none><block_start>artist="unknown"<block_end>title=rdr.get_value("title")<if_stmt>title<is><none><block_start>title=re.sub("\.[^\.]*$" "" os.path.basename(music_path))<block_end>bpm,offset=get_timing(music_path)<line_sep>osu_text=re.sub("{audio_filename}" "audio.mp3" osu_text)<line_sep>osu_text=re.sub("{game_mode}" str(game_mode) osu_text)<line_sep>osu_text=re.sub("{artist}" artist osu_text)<line_sep>osu_text=re.sub("{title}" title osu_text)<line_sep>osu_text=re.sub("{version}" get_difficulty_name() osu_text)<line_sep>osu_text=re.sub("{hp_drain}" "{}".format(np.random.randint(0 101)/10) osu_text)<if_stmt>mania_key_count<is><none><block_start>osu_text=re.sub("{circle_size}" "{}".format(np.random.randint(30 51)/10) osu_text)<block_end><else_stmt><block_start>osu_text=re.sub("{circle_size}" "{}".format(mania_key_count) osu_text)<block_end>osu_text=re.sub("{overall_difficulty}" "{}".format(np.random.randint(50 91)/10) osu_text)<line_sep>osu_text=re.sub("{approach_rate}" "{}".format(np.random.randint(70 96)/10) osu_text)<line_sep>osu_text=re.sub("{slider_velocity}" "{}".format(np.random.randint(12 26)/10) osu_text)<line_sep>osu_text=re.sub("{tickLength}" "{}".format(60000/bpm) osu_text)<line_sep>osu_text=re.sub("{offset}" "{}".format(int(offset)) osu_text)<line_sep>osu_text=re.sub("{colors}" get_colors() osu_text)<line_sep>osu_text=re.sub("{hit_objects}" "" osu_text)<with_stmt>open(output_filename 'w' encoding="utf8")<as>osu_output<block_start>osu_output.write(osu_text)<block_end>copy(music_path "./audio.mp3")<line_sep><return>output_filename<block_end>
|
<import_stmt>sys<import_stmt>notify2<import_from_stmt>notify2 Notification<line_sep>notify2.init(sys.argv[0])<def_stmt>send process=<none> subject_format='{executable} process {pid} ended' timeout=notify2.EXPIRES_NEVER<block_start>"""Display a Desktop Notification via DBUS (notify2)
:param process: information about process. (.info() inserted into body)
:param subject_format: subject format string. (uses process.__dict__)
:param timeout: how long to display notification (milliseconds) default 0 (never expires)
"""<line_sep>notif=Notification(subject_format.format(**process.__dict__) process.info())<line_sep>notif.timeout=timeout<line_sep>notif.show()<block_end>
|
"""py-air-control."""<line_sep>
|
<import_stmt>pytest<import_stmt>util.store<line_sep>@pytest.fixture<def_stmt>emptyStore <block_start><return>util.store.Store()<block_end>@pytest.fixture<def_stmt>store <block_start><return>util.store.Store()<block_end><def_stmt>test_get_of_unset_key emptyStore<block_start><assert_stmt>emptyStore.get("any-key")<eq><none><assert_stmt>emptyStore.get("any-key" "default-value")<eq>"default-value"<block_end><def_stmt>test_get_of_set_key store<block_start>store.set("key" "value")<assert_stmt>store.get("key")<eq>"value"<block_end><def_stmt>test_overwrite_set store<block_start>store.set("key" "value 1")<line_sep>store.set("key" "value 2")<assert_stmt>store.get("key")<eq>"value 2"<block_end><def_stmt>test_unused_keys store<block_start>store.set("key 1" "value x")<line_sep>store.set("key 2" "value y")<assert_stmt>sorted(store.unused_keys())<eq>sorted(["key 1" "key 2"])<line_sep>store.get("key 2")<assert_stmt>store.unused_keys()<eq>["key 1"]<line_sep>store.get("key 1")<assert_stmt>store.unused_keys()<eq>[]<block_end># vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>os<import_stmt>logging<import_stmt>shutil<import_stmt>wget<import_stmt>argparse<import_stmt>pandas<as>pd<import_from_stmt>google.cloud storage<import_from_stmt>google.cloud bigquery<import_from_stmt>google.cloud.exceptions NotFound Conflict<import_from_stmt>dataset_schema table_schema<import_from_stmt>dataset_clean dataset_clean_query<import_from_stmt>dataset_ml dataset_ml_query<line_sep>LOCAL_PATH="./data"<line_sep>FILENAME="online_retail"<def_stmt>download_url2gcs args<block_start>"""
args:
"""<line_sep>#set GCS client.
client=storage.Client()<line_sep># Retrieve GCS bucket.
bucket=client.get_bucket(args.GCS_BUCKET)<line_sep>blob=bucket.blob("data/online_retail.csv")<line_sep>#See if file already exists.
<if_stmt>blob.exists()<eq><false><block_start><try_stmt><block_start>os.mkdir(LOCAL_PATH)<line_sep>logging.info('Downloading xlsx file...')<line_sep>local_xlsx=wget.download(args.URL out=f"{LOCAL_PATH}/{FILENAME}.xlsx")<line_sep>logging.info('Converting xlsx -> csv...')<line_sep>df=pd.read_excel(local_xlsx)<line_sep>df.to_csv(f"{LOCAL_PATH}/{FILENAME}.csv" index=<false>)<line_sep>logging.info('Uploading local csv file to GCS...')<line_sep>blob.upload_from_filename(f"{LOCAL_PATH}/{FILENAME}.csv")<line_sep>logging.info('Copied local csv file to GCS.')<line_sep># Delete all contents of a directory using shutil.rmtree() and handle exceptions.
<try_stmt><block_start>shutil.rmtree(LOCAL_PATH)<line_sep>logging.info('Cleaning up local tmp data directory...')<block_end><except_stmt><block_start>logging.error('Error while deleting local tmp data directory.')<block_end><block_end>#print error if file doesn't exist.
<except_stmt>BaseException<as>error<block_start>logging.error('An exception occurred: {}'.format(error))<block_end><block_end>#print error if file already exists in GCS.
<else_stmt><block_start>logging.warning('File already exists in GCS.')<block_end><block_end><def_stmt>upload_gcs2bq args schema<block_start>"""
args:
schema:
"""<line_sep># Construct a BigQuery client object.
client=bigquery.Client()<line_sep># Construct a full Dataset object to send to the API.
logging.info('Initializing BigQuery dataset.')<line_sep>dataset=bigquery.Dataset(f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}")<try_stmt># Send the dataset to the API for creation, with an explicit timeout.
# Raises google.api_core.exceptions.Conflict if the Dataset already
# exists within the project.
<block_start>dataset=client.create_dataset(dataset timeout=30)# Make an API request.
# Specify the geographic location where the dataset should reside.
dataset.location=args.BQ_LOCATION<block_end><except_stmt>Conflict<block_start>logging.warning('Dataset %s already exists, not creating.' dataset.dataset_id)<block_end><else_stmt><block_start>logging.info("Created dataset %s.%s" client.project dataset.dataset_id)<block_end><try_stmt><block_start>URI=f"gs://{args.GCS_BUCKET}/data/{FILENAME}.csv"<line_sep>RAW_TABLE_ID=f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_RAW_TABLE_NAME}"<line_sep># Load job.
job_config=bigquery.LoadJobConfig(schema=schema skip_leading_rows=1 allow_jagged_rows=<true> write_disposition="WRITE_TRUNCATE" source_format=bigquery.SourceFormat.CSV)<line_sep>load_job=client.load_table_from_uri(source_uris=URI destination=RAW_TABLE_ID job_config=job_config)<line_sep>logging.info('BQ raw dataset load job starting...')<line_sep>load_job.result()# Waits for the job to complete.
logging.info('BQ raw dataset load job complete.')<block_end><except_stmt>BaseException<as>error<block_start>logging.error('An exception occurred: {}'.format(error))<block_end>destination_table=client.get_table(RAW_TABLE_ID)# Make an API request.
logging.info("Loaded %s rows into %s." destination_table.num_rows RAW_TABLE_ID)<block_end><def_stmt>make_dataset_clean_bq args query:str<block_start>"""
args:
query:
"""<line_sep>client=bigquery.Client()<line_sep>CLEAN_TABLE_ID=f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_CLEAN_TABLE_NAME}"<line_sep>RAW_TABLE_ID=f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_RAW_TABLE_NAME}"<line_sep>clean_query=query.replace("@CLEAN_TABLE_ID" CLEAN_TABLE_ID).replace("@RAW_TABLE_ID" RAW_TABLE_ID)<line_sep>logging.info('BQ make clean dataset starting...')<try_stmt><block_start>job=client.query(clean_query)<line_sep>_=job.result()<line_sep>logging.info('BQ make clean dataset complete')<block_end><except_stmt>BaseException<as>error<block_start>logging.error('An exception occurred: {}'.format(error))<block_end>destination_table=client.get_table(CLEAN_TABLE_ID)# Make an API request.
logging.info("Loaded %s rows into %s." destination_table.num_rows CLEAN_TABLE_ID)<block_end><def_stmt>make_dataset_ml_bq args query:str<block_start>"""
args:
query:
"""<line_sep>client=bigquery.Client()<line_sep>ML_TABLE_ID=f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_ML_TABLE_NAME}"<line_sep>CLEAN_TABLE_ID=f"{args.PROJECT_ID}.{args.BQ_DATASET_NAME}.{args.BQ_CLEAN_TABLE_NAME}"<line_sep>ml_query=query.replace("@ML_TABLE_ID" ML_TABLE_ID).replace("@CLEAN_TABLE_ID" CLEAN_TABLE_ID)<line_sep>logging.info('BQ make ML dataset starting...')<try_stmt><block_start>job=client.query(ml_query)<line_sep>_=job.result()<line_sep>logging.info('BQ make ML dataset complete')<block_end><except_stmt>BaseException<as>error<block_start>logging.error('An exception occurred: {}'.format(error))<block_end>destination_table=client.get_table(ML_TABLE_ID)# Make an API request.
logging.info("Loaded %s rows into %s." destination_table.num_rows ML_TABLE_ID)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--PROJECT_ID" dest="PROJECT_ID" type=str required=<true>)<line_sep>parser.add_argument("--GCS_BUCKET" dest="GCS_BUCKET" type=str required=<true>)<line_sep>parser.add_argument("--URL" dest="URL" type=str required=<true>)<line_sep>parser.add_argument("--BQ_DATASET_NAME" dest="BQ_DATASET_NAME" type=str default="online_retail")<line_sep>parser.add_argument("--BQ_LOCATION" dest="BQ_LOCATION" type=str default="US")<line_sep>parser.add_argument("--BQ_RAW_TABLE_NAME" dest="BQ_RAW_TABLE_NAME" type=str default="online_retail_clv_raw")<line_sep>parser.add_argument("--BQ_CLEAN_TABLE_NAME" dest="BQ_CLEAN_TABLE_NAME" type=str default="online_retail_clv_clean")<line_sep>parser.add_argument("--BQ_ML_TABLE_NAME" dest="BQ_ML_TABLE_NAME" type=str default="online_retail_clv_ml")<line_sep>args=parser.parse_args()<line_sep>logging.basicConfig(level=logging.INFO format="\n %(asctime)s [%(levelname)s] %(message)s" handlers=[logging.StreamHandler()])<line_sep>download_url2gcs(args)<line_sep>upload_gcs2bq(args table_schema)<line_sep>make_dataset_clean_bq(args dataset_clean_query)<line_sep>make_dataset_ml_bq(args dataset_ml_query)<block_end>
|
#--coding:utf-8 --
"""
"""<class_stmt>cDBSCAN<block_start>"""
The major class of the cDBSCAN algorithm, belong to <NAME>, <NAME>.
"""<def_stmt>__init__ self mat eps minPts<block_start>"""
@param mat: the raw or normalized [pointId,X,Y] data matrix
@type mat : np.array
@param eps: The clustering distance threshold, key parameter in DBSCAN.
@type eps: float
@param minPts: The min point in neighbor to define a core point, key
parameter in DBSCAN.
@type minPts: int
"""<line_sep>#: build the data in the class for global use
self.eps=eps<line_sep>self.minPts=minPts<line_sep>#: cell width, city block distance
self.cw=self.eps<line_sep>#: build the square index for quick neighbor search
self.buildGrids(mat)<line_sep>#: get the points for all neighbors
self.buildGridNeighbors()<line_sep>#: remove noise grids
self.removeNoiseGrids()<line_sep>#: get the points for all neighbors
self.buildGridNeighbors()<line_sep>#: get the clusters
self.callClusters()<del_stmt>self.Gs self.Gs2 self.ps<block_end><def_stmt>getDist self p q<block_start>"""
Basic function 1, city block distance funciton.
"""<line_sep>x=self.ps[p]<line_sep>y=self.ps[q]<line_sep>d=abs(x[0]-y[0])+abs(x[1]-y[1])<line_sep>#euclidean distance ,just in case.
#d = np.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
<return>d<block_end><def_stmt>getNearbyGrids self cell<block_start>"""
Basic funciton 2, 9 grid as searching neghbors, grid width is eps.
"""<line_sep>x,y=cell[0] cell[1]<line_sep>#keys = [(x, y),
keys=[(x y-1) (x y+1) (x-1 y) (x+1 y) (x-1 y-1) (x-1 y+1) (x+1 y-1) (x+1 y+1)]<line_sep>#keys = [(x, y), (x, y - 1), (x, y + 1), (x - 1, y), (x - 1, y - 1),
# (x - 1, y + 1), (x + 1, y), (x + 1, y - 1), (x + 1, y + 1),
# (x, y + 2), (x, y - 2), (x + 1, y + 2), (x + 1, y - 2),
# (x - 1, y + 2), (x - 1, y - 2), (x + 2, y), (x + 2, y + 1),
# (x + 2, y - 1), (x - 2, y), (x - 2, y + 1), (x - 2, y - 1)]
ncells=[]<for_stmt>key keys<block_start><if_stmt>key<in>self.Gs<block_start>ncells.append(key)<block_end><block_end><return>ncells<block_end><def_stmt>buildGrids self mat<block_start>"""
Algorithm 1: Construct the grids.
@param mat: the raw or normalized [pointId,X,Y] data matrix
"""<line_sep>minX,minY=mat[0][1] mat[0][2]<for_stmt>t mat<block_start>minX=min([minX t[1]])<line_sep>minY=min([minY t[2]])<block_end>Gs={}<line_sep>ps={}<for_stmt>d mat<block_start>nx=int((d[1]-minX)/self.cw)+1<line_sep>ny=int((d[2]-minY)/self.cw)+1<line_sep>Gs.setdefault((nx ny) [])<line_sep>Gs[(nx ny)].append(d[0])<line_sep>#last elements marks the class, initially -1 as noise
ps[d[0]]=[d[1] d[2] nx ny -1]<block_end>self.Gs,self.ps=Gs ps<block_end><def_stmt>buildGridNeighbors self<block_start>"""
Algorithm 2 : Grid index with all neighbor points.
"""<line_sep>Gs2={}<for_stmt>cell self.Gs.keys()<block_start>nps=[]<line_sep>nps.extend(self.Gs[cell])<for_stmt>cellj self.getNearbyGrids(cell)<block_start>nps.extend(self.Gs[cellj])<block_end>Gs2[cell]=nps<block_end>self.Gs2=Gs2<block_end><def_stmt>removeNoiseGrids self<block_start>"""
Algorithm 3: Remove noise grid according to KNN and get the obvious core points and core grids.
"""<line_sep>#: noise cells without neighbors
tode=set()<line_sep>#: noise cells with neighbors
tode2=set()<for_stmt>cell self.Gs.keys()<block_start><if_stmt>len(self.Gs2[cell])<l>self.minPts<block_start>tode2.add(cell)<block_end><block_end>#KNN to noise cells with neighbors
<for_stmt>cell tode2<block_start>cells=self.getNearbyGrids(cell)<line_sep>ncells=set(cells)&tode2<line_sep>#all neighbor cells are noise
<if_stmt>len(cells)<eq>len(ncells)<block_start>tode.add(cell)<block_end><block_end><for_stmt>cell tode<block_start><for_stmt>p self.Gs[cell]<block_start><del_stmt>self.ps[p]<block_end><del_stmt>self.Gs[cell]<block_end><block_end><def_stmt>callClusters self<block_start>"""
Algorithm 4: Do DBSCAN clustering by go through all points in the sets.
"""<line_sep>#: clustering id, noise is -2 and unclassified point is -1.
clusterId=0<for_stmt>key self.ps<block_start><if_stmt>self.ps[key][-1]<eq>-1<block_start><if_stmt>self.expandCluster(key clusterId)<block_start>clusterId<augadd>1<block_end><block_end><block_end>#remove the noise and unclassified points
labels={}<line_sep>cs={}<for_stmt>p self.ps.keys()<block_start>c=self.ps[p][-1]<if_stmt>c<eq>-2<block_start><continue><block_end>labels[p]=c<if_stmt>c<not><in>cs<block_start>cs[c]=[]<block_end>cs[c].append(p)<block_end><for_stmt>key cs.keys()<block_start><if_stmt>len(cs[key])<l>self.minPts<block_start><for_stmt>p cs[key]<block_start><del_stmt>labels[p]<block_end><block_end><block_end>self.labels=labels<block_end><def_stmt>expandCluster self pointKey clusterId<block_start>"""
Search connection for given point to others.
@param pointKey: the key in self.dataPoints
@type pointKey:
@param clusterId: the cluster id for the current
@type clusterId: int
@return: bool
"""<line_sep>seeds=self.regionQuery(pointKey)<if_stmt>len(seeds)<l>self.minPts<block_start>self.ps[pointKey][-1]=-2<line_sep><return><false><block_end><else_stmt><block_start><for_stmt>key seeds<block_start>self.ps[key][-1]=clusterId<block_end><while_stmt>len(seeds)<g>0<block_start>currentP=seeds[0]<line_sep>result=self.regionQuery(currentP)<if_stmt>len(result)<ge>self.minPts<block_start><for_stmt>key result<block_start><if_stmt>self.ps[key][-1]<in>[-1 -2]<block_start><if_stmt>self.ps[key][-1]<eq>-1<block_start>seeds.append(key)<block_end>self.ps[key][-1]=clusterId<block_end><block_end><block_end><del_stmt>(seeds[0])<block_end><return><true><block_end><block_end><def_stmt>regionQuery self pointKey<block_start>"""
Find the related points to the queried point, city block distance is used.
@param pointKey: the key in self.dataPoints
@type pointKey:
@return: list
"""<line_sep>p=self.ps[pointKey]<line_sep>x=p[2]<line_sep>y=p[3]<line_sep>#scan square and get nearby points.
result=[pointKey]<for_stmt>q self.Gs2[(x y)]<block_start><if_stmt>q<eq>pointKey<block_start><continue><block_end><if_stmt>self.getDist(pointKey q)<le>self.eps<block_start>result.append(q)<block_end><block_end><return>result<block_end><block_end>
|
<import_from_stmt>lightbus.utilities.logging configure_logging<import_from_stmt>lightbus.transports *<import_from_stmt>lightbus.client BusClient<import_from_stmt>lightbus.path *<import_from_stmt>lightbus.message *<import_from_stmt>lightbus.api *<import_from_stmt>lightbus.schema *<import_from_stmt>lightbus.creation *<import_from_stmt>lightbus.client.utilities OnError<import_from_stmt>lightbus.exceptions *<line_sep>
|
<import_from_stmt>litex.soc.cores.cpu.femtorv.core FemtoRV<line_sep>
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
<import_stmt>pytest<import_from_stmt>airbyte_cdk.models AirbyteRecordMessage AirbyteStream ConfiguredAirbyteCatalog ConfiguredAirbyteStream DestinationSyncMode SyncMode <import_from_stmt>source_acceptance_test.utils.asserts verify_records_schema<line_sep>@pytest.fixture(name="record_schema")<def_stmt>record_schema_fixture <block_start><return>{"properties":{"text_or_null":{"type":["null" "string"]} "number_or_null":{"type":["null" "number"]} "text":{"type":["string"]} "number":{"type":["number"]} } "type":["null" "object"] }<block_end>@pytest.fixture(name="configured_catalog")<def_stmt>catalog_fixture request record_schema<arrow>ConfiguredAirbyteCatalog<block_start>record_schema=request.param<if>hasattr(request "param")<else>record_schema<line_sep>stream=ConfiguredAirbyteStream(stream=AirbyteStream(name="my_stream" json_schema=record_schema) sync_mode=SyncMode.full_refresh destination_sync_mode=DestinationSyncMode.append )<line_sep><return>ConfiguredAirbyteCatalog(streams=[stream])<block_end><def_stmt>test_verify_records_schema configured_catalog:ConfiguredAirbyteCatalog<block_start>"""Test that correct records returned as records with errors, and verify specific error messages"""<line_sep>records=[{"text_or_null":123 # wrong format
"number_or_null":10.3 "text":"text" "number":"text" # wrong format
} {"text_or_null":"test" "number_or_null":<none> "text":<none> # wrong value
"number":<none> # wrong value
} {"text_or_null":<none> "number_or_null":<none> "text":"text" "number":77 } {"text_or_null":<none> "number_or_null":<none> "text":"text" "number":"text" # wrong format
} ]<line_sep>records=[AirbyteRecordMessage(stream="my_stream" data=record emitted_at=0)<for>record records]<line_sep>streams_with_errors=verify_records_schema(records configured_catalog)<line_sep>errors=[error.message<for>error streams_with_errors["my_stream"].values()]<assert_stmt>"my_stream"<in>streams_with_errors<assert_stmt>len(streams_with_errors)<eq>1 "only one stream"<assert_stmt>len(streams_with_errors["my_stream"])<eq>3 "only first error for each field"<assert_stmt>errors<eq>["123 is not of type 'null', 'string'" "'text' is not of type 'number'" "None is not of type 'string'"]<block_end>@pytest.mark.parametrize("record, configured_catalog, valid" [# Send null data
({"a":<none>} {"type":"object" "properties":{"a":{"type":"string" "format":"time"}}} <false>) # time
({"a":"sdf"} {"type":"object" "properties":{"a":{"type":"string" "format":"time"}}} <false>) ({"a":"12:00"} {"type":"object" "properties":{"a":{"type":"string" "format":"time"}}} <false>) ({"a":"12:00:90"} {"type":"object" "properties":{"a":{"type":"string" "format":"time"}}} <false>) ({"a":"12:00:22"} {"type":"object" "properties":{"a":{"type":"string" "format":"time"}}} <true>) # date
({"a":"12:00:90"} {"type":"object" "properties":{"a":{"type":"string" "format":"date"}}} <false>) ({"a":"2020-12-20"} {"type":"object" "properties":{"a":{"type":"string" "format":"date"}}} <true>) ({"a":"2020-20-20"} {"type":"object" "properties":{"a":{"type":"string" "format":"date"}}} <false>) # date-time
# full date-time format with timezone only valid, according to https://datatracker.ietf.org/doc/html/rfc3339#section-5.6
({"a":"12:11:00"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <false>) ({"a":"2018-11-13 20:20:39"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <true>) ({"a":"2021-08-10T12:43:15"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <true>) ({"a":"2021-08-10T12:43:15Z"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <true>) ({"a":"2018-11-13T20:20:39+00:00"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <true>) ({"a":"2018-21-13T20:20:39+00:00"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <false>) # This is valid for postgres sql but not valid for bigquery
({"a":"2014-09-27 9:35z"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <false>) # Seconds are obligatory for bigquery timestamp
({"a":"2014-09-27 9:35"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <false>) ({"a":"2014-09-27 9:35:0z"} {"type":"object" "properties":{"a":{"type":"string" "format":"date-time"}}} <true>) # email
({"a":"2018-11-13 20:20:39"} {"type":"object" "properties":{"a":{"type":"string" "format":"email"}}} <false>) ({"a":"<EMAIL>"} {"type":"object" "properties":{"a":{"type":"string" "format":"email"}}} <true>) ({"a":"<EMAIL>"} {"type":"object" "properties":{"a":{"type":"string" "format":"email"}}} <true>) ({"a":"写电子邮件@子邮件"} {"type":"object" "properties":{"a":{"type":"string" "format":"email"}}} <true>) # hostname
({"a":"2018-11-13 20:20:39"} {"type":"object" "properties":{"a":{"type":"string" "format":"hostname"}}} <false>) ({"a":"<EMAIL>"} {"type":"object" "properties":{"a":{"type":"string" "format":"hostname"}}} <false>) ({"a":"localhost"} {"type":"object" "properties":{"a":{"type":"string" "format":"hostname"}}} <true>) ({"a":"example.com"} {"type":"object" "properties":{"a":{"type":"string" "format":"hostname"}}} <true>) # ipv4
({"a":"example.com"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv4"}}} <false>) ({"a":"0.0.0.1000"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv4"}}} <false>) ({"a":"0.0.0.0"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv4"}}} <true>) # ipv6
({"a":"example.com"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv6"}}} <false>) ({"a":"fc00:db20:35b:7399::5"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv6"}}} <true>) ({"a":"::1"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv6"}}} <true>) ({"a":"::"} {"type":"object" "properties":{"a":{"type":"string" "format":"ipv6"}}} <true>) ] indirect=["configured_catalog"] )<def_stmt>test_validate_records_format record configured_catalog valid<block_start>records=[AirbyteRecordMessage(stream="my_stream" data=record emitted_at=0)]<line_sep>streams_with_errors=verify_records_schema(records configured_catalog)<if_stmt>valid<block_start><assert_stmt><not>streams_with_errors<block_end><else_stmt><block_start><assert_stmt>streams_with_errors f"Record {record} should produce errors against {configured_catalog.streams[0].stream.json_schema}"<block_end><block_end>
|
<import_stmt>anuga<import_from_stmt>anuga.validation_utilities produce_report<line_sep>args=anuga.get_args()<line_sep>produce_report('numerical_runup.py' args=args)<line_sep>
|
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>probflow.modules Dense Sequential<import_from_stmt>probflow.parameters Parameter<import_from_stmt>probflow.utils.settings Sampling<line_sep>tfd=tfp.distributions<def_stmt>is_close a b tol=1e-3<block_start><return>np.abs(a-b)<l>tol<block_end><def_stmt>test_Sequential <block_start>"""Tests probflow.modules.Sequential"""<line_sep># Create the module
seq=Sequential([Dense(5 10) tf.nn.relu Dense(10 3) tf.nn.relu Dense(3 1)])<line_sep># Steps should be list
<assert_stmt>isinstance(seq.steps list)<assert_stmt>len(seq.steps)<eq>5<line_sep># Test MAP outputs are the same
x=tf.random.normal([4 5])<line_sep>samples1=seq(x)<line_sep>samples2=seq(x)<assert_stmt>np.all(samples1.numpy()<eq>samples2.numpy())<assert_stmt>samples1.ndim<eq>2<assert_stmt>samples1.shape[0]<eq>4<assert_stmt>samples1.shape[1]<eq>1<line_sep># Test samples are different
<with_stmt>Sampling(n=1)<block_start>samples1=seq(x)<line_sep>samples2=seq(x)<block_end><assert_stmt>np.all(samples1.numpy()<ne>samples2.numpy())<assert_stmt>samples1.ndim<eq>2<assert_stmt>samples1.shape[0]<eq>4<assert_stmt>samples1.shape[1]<eq>1<line_sep># parameters should return list of all parameters
param_list=seq.parameters<assert_stmt>isinstance(param_list list)<assert_stmt>len(param_list)<eq>6<assert_stmt>all(isinstance(p Parameter)<for>p param_list)<line_sep>param_names=[p.name<for>p seq.parameters]<assert_stmt>"Dense_weights"<in>param_names<assert_stmt>"Dense_bias"<in>param_names<line_sep>param_shapes=[p.shape<for>p seq.parameters]<assert_stmt>[5 10]<in>param_shapes<assert_stmt>[1 10]<in>param_shapes<assert_stmt>[10 3]<in>param_shapes<assert_stmt>[1 3]<in>param_shapes<assert_stmt>[3 1]<in>param_shapes<assert_stmt>[1 1]<in>param_shapes<line_sep># kl_loss should return sum of KL losses
kl_loss=seq.kl_loss()<assert_stmt>isinstance(kl_loss tf.Tensor)<assert_stmt>kl_loss.ndim<eq>0<block_end>
|
<import_stmt>base64<import_stmt>hashlib<import_stmt>hmac<import_stmt>logging<import_stmt>time<import_from_stmt>typing Optional Dict Any<import_from_stmt>uuid UUID<import_stmt>websocket<import_stmt>json<import_stmt>os<import_from_stmt>threading Thread<import_from_stmt>cryptography.hazmat.primitives hashes<import_from_stmt>cryptography.hazmat.primitives.asymmetric padding<import_from_stmt>cryptography.hazmat.primitives.asymmetric.rsa RSAPrivateKey<import_from_stmt>..core.playbooks.playbook_utils to_safe_str<import_from_stmt>..core.playbooks.playbooks_event_handler PlaybooksEventHandler<import_from_stmt>..core.model.env_vars INCOMING_REQUEST_TIME_WINDOW_SECONDS RUNNER_VERSION<import_from_stmt>..core.reporting.action_requests ExternalActionRequest ActionRequestBody sign_action_request PartialAuth <import_from_stmt>..utils.auth_provider AuthProvider<line_sep>WEBSOCKET_RELAY_ADDRESS=os.environ.get("WEBSOCKET_RELAY_ADDRESS" "wss://relay.robusta.dev")<line_sep>CLOUD_ROUTING=json.loads(os.environ.get("CLOUD_ROUTING" "True").lower())<line_sep>RECEIVER_ENABLE_WEBSOCKET_TRACING=json.loads(os.environ.get("RECEIVER_ENABLE_WEBSOCKET_TRACING" "False").lower())<line_sep>INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC=int(os.environ.get("INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC" 3))<class_stmt>ActionRequestReceiver<block_start><def_stmt>__init__ self event_handler:PlaybooksEventHandler<block_start>self.event_handler=event_handler<line_sep>self.active=<true><line_sep>self.account_id=self.event_handler.get_global_config().get("account_id")<line_sep>self.cluster_name=self.event_handler.get_global_config().get("cluster_name")<line_sep>self.auth_provider=AuthProvider()<line_sep>self.ws=websocket.WebSocketApp(WEBSOCKET_RELAY_ADDRESS on_open=self.on_open on_message=self.on_message on_error=self.on_error )<if_stmt><not>self.account_id<or><not>self.cluster_name<block_start>logging.error(f"Action receiver cannot start. "<concat>f"Missing required account_id {self.account_id} cluster_name {self.cluster_name}")<line_sep><return><block_end>self.start_receiver()<block_end><def_stmt>start_receiver self<block_start><if_stmt><not>CLOUD_ROUTING<block_start>logging.info("outgoing messages only mode. Incoming event receiver not initialized")<line_sep><return><block_end><if_stmt>WEBSOCKET_RELAY_ADDRESS<eq>""<block_start>logging.warning("relay address empty. Not initializing relay")<line_sep><return><block_end>websocket.enableTrace(RECEIVER_ENABLE_WEBSOCKET_TRACING)<line_sep>receiver_thread=Thread(target=self.run_forever)<line_sep>receiver_thread.start()<block_end><def_stmt>run_forever self<block_start>logging.info("starting relay receiver")<while_stmt>self.active<block_start>self.ws.run_forever()<line_sep>logging.info("relay websocket closed")<line_sep>time.sleep(INCOMING_WEBSOCKET_RECONNECT_DELAY_SEC)<block_end><block_end><def_stmt>stop self<block_start>logging.info(f"Stopping incoming receiver")<line_sep>self.active=<false><line_sep>self.ws.close()<block_end>@classmethod<def_stmt>__sync_response cls status_code:int request_id:str data<arrow>Dict<block_start><return>{"action":"response" "request_id":request_id "status_code":status_code "data":data}<block_end><def_stmt>__exec_external_request self action_request:ExternalActionRequest validate_timestamp:bool<block_start>logging.info(f"Callback `{action_request.body.action_name}` {to_safe_str(action_request.body.action_params)}")<line_sep>sync_response=action_request.request_id<ne>""# if request_id is set, we need to write back the response
<if_stmt><not>self.__validate_request(action_request validate_timestamp)<block_start>req_json=action_request.json(exclude={"body"})<line_sep>body_json=action_request.body.json(exclude={"action_params"})# action params already printed above
logging.error(f"Failed to validate action request {req_json} {body_json}")<if_stmt>sync_response<block_start>self.ws.send(data=json.dumps(self.__sync_response(401 action_request.request_id "")))<block_end><return><block_end>response=self.event_handler.run_external_action(action_request.body.action_name action_request.body.action_params action_request.body.sinks sync_response action_request.no_sinks )<if_stmt>sync_response<block_start>http_code=200<if>response.get("success")<else>500<line_sep>self.ws.send(data=json.dumps(self.__sync_response(http_code action_request.request_id response)))<block_end><block_end><def_stmt>on_message self ws message# TODO: use typed pydantic classes here?
<block_start>incoming_event=json.loads(message)<line_sep>actions=incoming_event.get("actions" <none>)<if_stmt>actions# this is slack callback format
# slack callbacks have a list of 'actions'. Within each action there a 'value' field,
# which container the actual action details we need to run.
# This wrapper format is part of the slack API, and cannot be changed by us.
<block_start><for_stmt>action actions<block_start>raw_action=action.get("value" <none>)<try_stmt><block_start>self.__exec_external_request(ExternalActionRequest.parse_raw(raw_action) <false>)<block_end><except_stmt>Exception<block_start>logging.error(f"Failed to run incoming event {ActionRequestReceiver._stringify_incoming_event(raw_action)}" exc_info=<true>)<block_end><block_end><block_end><else_stmt># assume it's ActionRequest format
<block_start><try_stmt><block_start>self.__exec_external_request(ExternalActionRequest(**incoming_event) <true>)<block_end><except_stmt>Exception<block_start>logging.error(f"Failed to run incoming event {ActionRequestReceiver._stringify_incoming_event(incoming_event)}" exc_info=<true>)<block_end><block_end><block_end>@staticmethod<def_stmt>_stringify_incoming_event incoming_event<arrow>str<block_start>"""Stringify incoming request masking action params in case it contains secrets"""<if_stmt>isinstance(incoming_event str)# slack format, stringified json
<block_start><try_stmt><block_start>event_dict=json.loads(incoming_event)<block_end><except_stmt>Exception<block_start>logging.error("Failed to parse raw incoming event" exc_info=<true>)<line_sep><return>"parse error"<block_end><block_end><elif_stmt>isinstance(incoming_event dict)<block_start>event_dict=incoming_event<block_end><else_stmt><block_start><return>f"Unknown incoming_event type {type(incoming_event)}"<block_end>body=event_dict.pop("body" {})<line_sep>action_params=body.pop("action_params" {})<line_sep><return>f"{event_dict} {body} {to_safe_str(action_params)}"<block_end><def_stmt>on_error self ws error<block_start>logging.info(f"Relay websocket error: {error}")<block_end><def_stmt>on_open self ws<block_start>account_id=self.event_handler.get_global_config().get("account_id")<line_sep>cluster_name=self.event_handler.get_global_config().get("cluster_name")<line_sep>open_payload={"action":"auth" "account_id":account_id "cluster_name":cluster_name "version":RUNNER_VERSION }<line_sep>logging.info(f"connecting to server as account_id={account_id}; cluster_name={cluster_name}")<line_sep>ws.send(json.dumps(open_payload))<block_end><def_stmt>__validate_request self action_request:ExternalActionRequest validate_timestamp:bool<arrow>bool<block_start>"""
Two auth protocols are supported:
1. signature - Signing the body using the signing_key should match the signature
2. partial keys auth - using partial_auth_a and partial_auth_b
Each partial auth should be decrypted using the private key (rsa private key).
The content should have 2 items:
- key
- body hash
The operation key_a XOR key_b should be equal to the signing_key
If both protocols are present, we only check the signature
"""<if_stmt>validate_timestamp<and>(time.time()-action_request.body.timestamp<g>INCOMING_REQUEST_TIME_WINDOW_SECONDS)<block_start>logging.error(f"Rejecting incoming request because it's too old. Cannot verify request {action_request}")<line_sep><return><false><block_end>signing_key=self.event_handler.get_global_config().get("signing_key")<line_sep>body=action_request.body<if_stmt><not>signing_key<block_start>logging.error(f"Signing key not available. Cannot verify request {body}")<line_sep><return><false><block_end># First auth protocol option, based on signature only
signature=action_request.signature<if_stmt>signature<block_start>generated_signature=sign_action_request(body signing_key)<line_sep><return>hmac.compare_digest(generated_signature signature)<block_end># Second auth protocol option, based on public key
partial_auth_a=action_request.partial_auth_a<line_sep>partial_auth_b=action_request.partial_auth_b<if_stmt><not>partial_auth_a<or><not>partial_auth_b<block_start>logging.error(f"Insufficient authentication data. Cannot verify request {body}")<line_sep><return><false><block_end>private_key=self.auth_provider.get_private_rsa_key()<if_stmt><not>private_key<block_start>logging.error(f"Private RSA key missing. Cannot validate request for {body}")<line_sep><return><false><block_end>a_valid,key_a=self.__extract_key_and_validate(partial_auth_a private_key body)<line_sep>b_valid,key_b=self.__extract_key_and_validate(partial_auth_b private_key body)<if_stmt><not>a_valid<or><not>b_valid<block_start>logging.error(f"Cloud not validate partial auth for {body}")<line_sep><return><false><block_end><try_stmt><block_start>signing_key_uuid=UUID(signing_key)<block_end><except_stmt>Exception<block_start>logging.error(f"Wrong signing key format. Cannot validate parital auth for {body}")<line_sep><return><false><block_end><if_stmt>(key_a.int^key_b.int)<ne>signing_key_uuid.int<block_start>logging.error(f"Partial auth keys combination mismatch for {body}")<line_sep><return><false><block_end><return><true><block_end>@classmethod<def_stmt>__extract_key_and_validate cls encrypted:str private_key:RSAPrivateKey body:ActionRequestBody<arrow>(bool Optional[UUID])<block_start><try_stmt><block_start>plain=private_key.decrypt(base64.b64decode(encrypted.encode("utf-8")) padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()) algorithm=hashes.SHA256() label=<none>))<line_sep>auth=PartialAuth(**json.loads(plain.decode("utf-8")))<line_sep>body_string=body.json(exclude_none=<true> sort_keys=<true> separators=(',' ':')).encode("utf-8")<line_sep>body_hash=f"v0={hashlib.sha256(body_string).hexdigest()}"<line_sep><return>hmac.compare_digest(body_hash auth.hash) auth.key<block_end><except_stmt>Exception<block_start>logging.error("Error validating partial auth data" exc_info=<true>)<line_sep><return><false> <none><block_end><block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>addons.base.tests.base OAuthAddonTestCaseMixin AddonTestCase<import_from_stmt>addons.s3.tests.factories S3AccountFactory<import_from_stmt>addons.s3.provider S3Provider<import_from_stmt>addons.s3.serializer S3Serializer<class_stmt>S3AddonTestCase(OAuthAddonTestCaseMixin AddonTestCase)<block_start>ADDON_SHORT_NAME='s3'<line_sep>ExternalAccountFactory=S3AccountFactory<line_sep>Provider=S3Provider<line_sep>Serializer=S3Serializer<line_sep>client=<none><line_sep>folder={'path':'bucket' 'name':'bucket' 'id':'bucket'}<block_end>
|
<import_from_stmt>django forms<import_from_stmt>osf.models Preprint<class_stmt>ChangeProviderForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Preprint<line_sep>fields=('provider' )<block_end><block_end>
|
<import_from_stmt>testil eq<import_from_stmt>..corrupt_couch find_missing_ids<def_stmt>test_find_missing_ids <block_start><def_stmt>test result_sets expected_missing expected_tries min_tries=5<block_start><def_stmt>get_ids <block_start><while_stmt>len(results)<g>1<block_start><return>results.pop()<block_end><return>results[0]<block_end>results=list(reversed(result_sets))<line_sep>missing,tries=find_missing_ids(get_ids min_tries)<line_sep>eq(missing expected_missing)<line_sep>eq(tries expected_tries)<block_end><yield>test [{1 2}] set() 5<line_sep><yield>test [{1 2}] set() 6 6<line_sep><yield>test [{1 2}] set() 10 10<line_sep><yield>test [{1 2} {1 3} {2 3}] {1 2 3} 7<line_sep><yield>test [{1 2} {1 3} {1 3} {1 3} {1 3} {2 3}] {1 2 3} 10<line_sep><yield>test [{1 2}]+[{1 3}]<times>5+[{2 4}] {2 3} 6<line_sep><yield>test [{1 2}]+[{1 3}]<times>10+[{2 4}] {2 3} 11 10<block_end>
|
<import_stmt>os<line_sep>os.environ['ONIR_IGNORE_ARGV']='true'<import_stmt>json<import_stmt>argparse<import_from_stmt>onir metrics<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('qrels')<line_sep>parser.add_argument('run')<line_sep>parser.add_argument('--each_topic' '-q' action='store_true')<line_sep>parser.add_argument('--nosummary' '-n' action='store_true')<line_sep>parser.add_argument('--json_output' '-j' action='store_true')<line_sep>parser.add_argument('--verbose' '-v' action='store_true')<line_sep>parser.add_argument('metrics' nargs='+')<line_sep>args=parser.parse_args()<line_sep>result=metrics.calc(args.qrels args.run args.metrics verbose=args.verbose)<if_stmt>args.json_output<block_start>print(json.dumps(result))<block_end><elif_stmt>result<block_start><if_stmt>args.each_topic<block_start><for_stmt>qid result[args.metrics[0]]<block_start><for_stmt>metric args.metrics<block_start>print(f'{metric}\t{qid}\t{result[metric][qid]:.4f}')<block_end><block_end><block_end><if_stmt><not>args.nosummary<block_start><for_stmt>metric,mean metrics.mean(result).items()<block_start>print(f'{metric}\tall\t{mean:.4f}')<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch optim<import_from_stmt>torch.autograd Variable<import_from_stmt>pdb set_trace<as>T<import_from_stmt>forge.ethyr rollouts<import_from_stmt>forge.ethyr.torch loss<import_from_stmt>forge.ethyr.torch param<class_stmt>ManualAdam(optim.Adam)<block_start><def_stmt>step self grads<block_start>grads=Variable(torch.Tensor(np.array(grads)))<line_sep>self.param_groups[0]['params'][0].grad=grads<line_sep>super().step()<block_end><block_end><class_stmt>ManualSGD(optim.SGD)<block_start><def_stmt>step self grads<block_start>grads=Variable(torch.Tensor(np.array(grads)))<line_sep>self.param_groups[0]['params'][0].grad=grads<line_sep>super().step()<block_end><block_end><def_stmt>backward rolls anns valWeight=0.5 entWeight=0<block_start>atns,vals,rets=rollouts.mergeRollouts(rolls.values())<line_sep>returns=torch.tensor(rets).view(-1 1).float()<line_sep>vals=torch.cat(vals)<line_sep>pg,entropy,attackentropy=0 0 0<for_stmt>i,atnList enumerate(atns)<block_start>aArg,aArgIdx=list(zip(*atnList))<line_sep>aArgIdx=torch.stack(aArgIdx)<line_sep>l,e=loss.PG(aArg aArgIdx vals returns)<line_sep>pg<augadd>l<line_sep>entropy<augadd>e<block_end>valLoss=loss.valueLoss(vals returns)<line_sep>totLoss=pg+valWeight<times>valLoss+entWeight<times>entropy<line_sep>totLoss.backward()<line_sep>grads=[param.getGrads(ann)<for>ann anns]<line_sep>reward=np.mean(rets)<line_sep><return>reward vals.mean() grads pg valLoss entropy<block_end>
|
<import_stmt>asyncio<import_stmt>hashlib<import_from_stmt>event_driven_simulator Connection<line_sep>RPC_TIMEOUT_MS=100<class_stmt>ClientRequest<block_start><def_stmt>__init__ self data<block_start>self.data=data<block_end><def_stmt>digest self<block_start>m=hashlib.sha256()<line_sep>m.update(self.data)<line_sep><return>m.digest()<block_end><block_end><class_stmt>PrePrepareMsg<block_start><def_stmt>__init__ self view seq_num digest m sig<block_start>self.view=view<line_sep># Seq number
self.seq_num=seq_num<line_sep># Client request digest
self.digest=digest<line_sep># Client request (can be carried by different transport)
self.m=m<line_sep># Siganture
self.sig=sig<block_end><block_end><class_stmt>PrepareMsg<block_start><def_stmt>__init__ self view seq_num digest node_id sig# View
<block_start>self.view=view<line_sep># Seq number
self.seq_num=seq_num<line_sep># Digest
self.digest=digest<line_sep># Node id (may not needed as node_id can be identified from sig)
self.node_id=node_id<line_sep>self.sig=sig<block_end><block_end><class_stmt>CommitMsg<block_start><def_stmt>__init__ self view seq_num digest node_id sig<block_start>self.view=view<line_sep>self.seq_num=seq_num<line_sep>self.digest=digest<line_sep>self.node_id=node_id<line_sep>self.sig=sig<block_end><block_end><class_stmt>CheckpointMsg<block_start><def_stmt>__init__ self seq_num state_digest sign<block_start>self.seq_num=seq_num<line_sep>self.state_digest=state_digest<line_sep>self.sign=sign<block_end><block_end><class_stmt>Node<block_start><def_stmt>__init__ self node_id view is_primary=<false><block_start>self.node_id=node_id<line_sep>self.is_primary=is_primary<line_sep># TODO
self.primary_node_id=0<line_sep>self.view=view<line_sep>self.connection_list=[]<line_sep>self.isCrashing=<false><line_sep>self.state=b""<line_sep># TODO
self.h=0<line_sep>self.H=10000<line_sep>self.seq_num=0<line_sep># Received messages. all should be persisted
# Could be removed after checkpoint
self.pre_prepare_msg_map=dict()<line_sep>self.prepare_msg_map=dict()<line_sep>self.commit_msg_map=dict()<line_sep>self.committed_set=set()<block_end><def_stmt>addConnection self conn<block_start>self.connection_list.append(conn)<block_end><def_stmt>__get_seq_num self# TODO: H check
<block_start>self.seq_num<augadd>1<line_sep><return>self.seq_num<block_end><async_keyword><def_stmt>start self<block_start><while_stmt><true><block_start><await>asyncio.sleep(1)<block_end><block_end><def_stmt>sendClientRequest self m<block_start><if_stmt><not>self.is_primary<block_start><return><none><block_end>msg=PrePrepareMsg(self.view self.__get_seq_num() m.digest() m self.node_id)<line_sep>self.pre_prepare_msg_map[msg.seq_num]=msg<line_sep>print("Node {}: sending pre-prepare msg, seq no {}, digest {}".format(self.node_id msg.seq_num msg.digest.hex()))<for_stmt>conn self.connection_list<block_start>asyncio.ensure_future(conn.sendPrePrepareMsgAsync(msg))<block_end><block_end># RPC handling
<def_stmt>handlePrePrepareMsg self msg<block_start><if_stmt>self.view<ne>msg.view<block_start><return><block_end><if_stmt>self.primary_node_id<ne>msg.sig<block_start><return><block_end><if_stmt>msg.seq_num<l>self.h<or>msg.seq_num<g>self.H<block_start><return><block_end><if_stmt>msg.seq_num<in>self.pre_prepare_msg_map<block_start><return><block_end>print("Node {}: processing pre-prepare msg, seq no {}, digest {}".format(self.node_id msg.seq_num msg.digest.hex()))<line_sep>self.pre_prepare_msg_map[msg.seq_num]=msg<line_sep>self.prepare_msg_map.setdefault(msg.seq_num set()).add(self.node_id)<line_sep>prepare_msg=PrepareMsg(msg.view msg.seq_num msg.digest self.node_id self.node_id)<for_stmt>conn self.connection_list<block_start>asyncio.ensure_future(conn.sendPrepareMsgAsync(prepare_msg))<block_end><block_end><def_stmt>__num_2f self<block_start>f=(len(self.connection_list)+1-1)<floordiv>3<line_sep><return>2<times>f<block_end><def_stmt>__is_prepared self seq_num<block_start><return>len(self.prepare_msg_map.get(seq_num set()))<ge>self.__num_2f()<block_end><def_stmt>handlePrepareMsg self msg<block_start><if_stmt>self.view<ne>msg.view<block_start><return><block_end># TODO: May cache the prepare message until pre_prepare is received.
<if_stmt>msg.seq_num<not><in>self.pre_prepare_msg_map<block_start><return><block_end>pre_prepare_msg=self.pre_prepare_msg_map[msg.seq_num]<if_stmt>pre_prepare_msg.digest<ne>msg.digest<block_start><return><block_end>print("Node {}: processing prepare msg from {}, seq no {}, digest {}".format(self.node_id msg.node_id msg.seq_num msg.digest.hex()))<line_sep>is_prepared_before=self.__is_prepared(msg.seq_num)<line_sep>self.prepare_msg_map.setdefault(msg.seq_num set()).add(msg.node_id)<if_stmt><not>is_prepared_before<and>self.__is_prepared(msg.seq_num)# Broadcast commit
<block_start>self.commit_msg_map.setdefault(msg.seq_num set()).add(self.node_id)<line_sep>commit_msg=CommitMsg(msg.view msg.seq_num msg.digest self.node_id self.node_id)<line_sep>print("Node {}: sending commit msg, seq no {}, digest {}".format(self.node_id msg.seq_num msg.digest.hex()))<for_stmt>conn self.connection_list<block_start>asyncio.ensure_future(conn.sendCommitMsgAsync(commit_msg))<block_end><block_end><block_end><def_stmt>handleCommitMsg self msg<block_start><if_stmt>self.view<ne>msg.view<block_start><return><block_end><if_stmt>msg.seq_num<not><in>self.pre_prepare_msg_map<block_start><return><block_end>pre_prepare_msg=self.pre_prepare_msg_map[msg.seq_num]<if_stmt>pre_prepare_msg.digest<ne>msg.digest<block_start><return><block_end>print("Node {}: processing commit msg from {}, seq no {}, digest {}".format(self.node_id msg.node_id msg.seq_num msg.digest.hex()))<line_sep>self.commit_msg_map.setdefault(msg.seq_num set()).add(msg.node_id)<if_stmt>(len(self.commit_msg_map[msg.seq_num])<ge>self.__num_2f()+1<and>msg.seq_num<not><in>self.committed_set)# TODO: Check the requests with lower sequences are executed (finalized)
# Message is irreversible/finalized.
# May discard all logs of the message,
# but current view-change protocol needs prepare messages.
# May replace with the digest as key
<block_start>self.committed_set.add(msg.seq_num)<line_sep># Simple state execution
s=hashlib.sha256()<line_sep>s.update(self.state)<line_sep>s.update(pre_prepare_msg.m.digest())<line_sep>self.state=s.digest()<line_sep>print("Node {}: msg with digest {} commited, state {}".format(self.node_id msg.digest.hex() self.state.hex()))<line_sep>checkpoint_msg=CheckpointMsg(msg.seq_num self.state self.node_id)<for_stmt>conn self.connection_list<block_start>asyncio.ensure_future(conn.sendCheckpointMsgAsync(checkpoint_msg))<block_end><block_end><block_end><def_stmt>handleCheckpointMsg self msg<block_start><pass><block_end><block_end><class_stmt>PbftConnection(Connection)<block_start><def_stmt>__init__ self source destination timeoutMs=RPC_TIMEOUT_MS networkDelayGenerator=<lambda>:0 <block_start>super().__init__(source destination timeoutMs networkDelayGenerator)<block_end><async_keyword><def_stmt>sendPrePrepareMsgAsync self request<block_start><return><await>self.callWithDelayOrTimeout(<lambda>:self.destination.handlePrePrepareMsg(request))<block_end><async_keyword><def_stmt>sendPrepareMsgAsync self request<block_start><return><await>self.callWithDelayOrTimeout(<lambda>:self.destination.handlePrepareMsg(request))<block_end><async_keyword><def_stmt>sendCommitMsgAsync self request<block_start><return><await>self.callWithDelayOrTimeout(<lambda>:self.destination.handleCommitMsg(request))<block_end><async_keyword><def_stmt>sendCheckpointMsgAsync self request<block_start><return><await>self.callWithDelayOrTimeout(<lambda>:self.destination.handleCheckpointMsg(request))<block_end><block_end>N=4<line_sep>nodeList=[Node(i view=0 is_primary=i<eq>0)<for>i range(N)]<line_sep>connectionMap={}<for_stmt>i range(N)<block_start><for_stmt>j range(N)<block_start><if_stmt>i<eq>j<block_start><continue><block_end>source=nodeList[i]<line_sep>dest=nodeList[j]<line_sep>source.addConnection(PbftConnection(source dest))<block_end><block_end><for_stmt>i range(N)<block_start>asyncio.get_event_loop().create_task(nodeList[i].start())<block_end># nodeList[-1].isCrashing = True
# nodeList[-2].isCrashing = True
nodeList[0].sendClientRequest(ClientRequest(b""))<try_stmt><block_start>asyncio.get_event_loop().run_forever()<block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end>
|
<import_from_stmt>unittest.mock MagicMock<import_stmt>pytest<import_from_stmt>butterfree.clients CassandraClient<import_from_stmt>butterfree.hooks.schema_compatibility CassandraTableSchemaCompatibilityHook<class_stmt>TestCassandraTableSchemaCompatibilityHook<block_start><def_stmt>test_run_compatible_schema self spark_session<block_start>cassandra_client=CassandraClient(host=["mock"] keyspace="dummy_keyspace")<line_sep>cassandra_client.sql=MagicMock(# type: ignore
return_value=[{"column_name":"feature1" "type":"text"} {"column_name":"feature2" "type":"int"} ])<line_sep>table="table"<line_sep>input_dataframe=spark_session.sql("select 'abc' as feature1, 1 as feature2")<line_sep>hook=CassandraTableSchemaCompatibilityHook(cassandra_client table)<line_sep># act and assert
<assert_stmt>hook.run(input_dataframe)<eq>input_dataframe<block_end><def_stmt>test_run_incompatible_schema self spark_session<block_start>cassandra_client=CassandraClient(host=["mock"] keyspace="dummy_keyspace")<line_sep>cassandra_client.sql=MagicMock(# type: ignore
return_value=[{"column_name":"feature1" "type":"text"} {"column_name":"feature2" "type":"bigint"} ])<line_sep>table="table"<line_sep>input_dataframe=spark_session.sql("select 'abc' as feature1, 1 as feature2")<line_sep>hook=CassandraTableSchemaCompatibilityHook(cassandra_client table)<line_sep># act and assert
<with_stmt>pytest.raises(ValueError match="There's a schema incompatibility between")<block_start>hook.run(input_dataframe)<block_end><block_end><block_end>
|
<import_stmt>unittest<import_from_stmt>kafka.tools.protocol.requests ArgumentError<import_from_stmt>kafka.tools.protocol.requests.offset_fetch_v2 OffsetFetchV2Request<class_stmt>OffsetFetchV2RequestTests(unittest.TestCase)<block_start><def_stmt>test_process_arguments self<block_start>val=OffsetFetchV2Request.process_arguments(['groupname' 'topicname,4' 'nexttopic,9'])<assert_stmt>val<eq>{'group_id':'groupname' 'topics':[{'topic':'topicname' 'partitions':[4]} {'topic':'nexttopic' 'partitions':[9]}]}<block_end><def_stmt>test_process_arguments_alltopics self<block_start>val=OffsetFetchV2Request.process_arguments(['groupname'])<assert_stmt>val<eq>{'group_id':'groupname' 'topics':<none>}<block_end><def_stmt>test_process_arguments_notenough self<block_start>self.assertRaises(ArgumentError OffsetFetchV2Request.process_arguments [])<block_end><block_end>
|
# -*- coding: utf-8 -*-
u"""
Created on 2015-8-8
@author: cheng.li
"""<line_sep>__all__=['Timeseries' 'Normalizer']<import_from_stmt>PyFin.Math.Timeseries.Timeseries Timeseries<import_from_stmt>PyFin.Math.Timeseries.Normalizers Normalizer<line_sep>
|
<import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<line_sep># the next line can be removed after installation
sys.path.insert(0 os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))<import_from_stmt>veriloggen *<line_sep># new intance methods for NewModule
_recipe_control=<lambda>m:(m.Input('CLK') m.Input('RST'))<line_sep>_recipe_led=<lambda>m width=8:(m.OutputReg('LED' width initval=0) m.Reg('count' 32 initval=0))<line_sep># new class based on Module
NewModule=type('NewModule' (Module ) {'recipe_control':_recipe_control 'recipe_led':_recipe_led})<def_stmt>mkLed width=8 maxcount=1024<block_start>m=NewModule('blinkled')<line_sep>clk,rst=m.recipe_control()<line_sep>led,count=m.recipe_led(width)<line_sep>m.Always(Posedge(clk))(If(rst)(count(0)).Else(If(count<eq>1023)(count(0)).Else(count(count+1))))<line_sep>m.Always(Posedge(clk))(If(rst)(led(0)).Else(If(count<eq>1024-1)(led(led+1))))<line_sep><return>m<block_end><if_stmt>__name__<eq>'__main__'<block_start>led=mkLed()<line_sep>verilog=led.to_verilog()<line_sep>print(verilog)<block_end>
|
#Challenge 4: Take a binary tree and reverse it
#I decided to create two classes. One to hold the node, and one to act as the Binary Tree.
#Node class
#Only contains the information for the node. Val is the value of the node, left is the left most value, and right is the right value
<class_stmt>Node<block_start><def_stmt>__init__ self val<block_start>self.left=<none><line_sep>self.right=<none><line_sep>self.val=val<block_end><block_end>#BinaryTree class
<class_stmt>BinaryTree#Initialize the tree with a blank root
<block_start><def_stmt>__init__ self<block_start>self.root=<none><block_end><def_stmt>getRoot self<block_start><return>self.root<block_end>#Recursively add node objects
<def_stmt>add self val<block_start><if_stmt>self.root<is><none><block_start>self.root=Node(val)<block_end><else_stmt><block_start>self._add(val self.root)<block_end><block_end><def_stmt>_add self val node<block_start><if_stmt>val<l>node.val<block_start><if_stmt>node.left<is><not><none><block_start>self._add(val node.left)<block_end><else_stmt><block_start>node.left=Node(val)<block_end><block_end><else_stmt><block_start><if_stmt>node.right<is><not><none><block_start>self._add(val node.right)<block_end><else_stmt><block_start>node.right=Node(val)<block_end><block_end><block_end>#Recursively print each node in the tree
<def_stmt>printTree self<block_start><if_stmt>self.root<is><not><none><block_start>self._printTree(self.root)<block_end><block_end><def_stmt>_printTree self node<block_start><if_stmt>node<is><not><none><block_start>self._printTree(node.left)<line_sep>print(node.val)<line_sep>self._printTree(node.right)<block_end><block_end>#returns a nested list of each level and the nodes in it
<def_stmt>getTree self<block_start>currLevel=[self.root]<line_sep>tree=list()<while_stmt>currLevel<block_start>lowerLevel=list()<line_sep>currNodes=list()<for_stmt>node currLevel<block_start>currNodes.append(node.val)<if_stmt>node.left<block_start>lowerLevel.append(node.left)<block_end><if_stmt>node.right<block_start>lowerLevel.append(node.right)<block_end><block_end>tree.append(currNodes)<line_sep>#print(currNodes)
currLevel=lowerLevel<block_end><return>tree<block_end><block_end><if_stmt>__name__<eq>'__main__'#create sample tree from example
<block_start>tree=BinaryTree()<line_sep>tree.add(4)<line_sep>tree.add(2)<line_sep>tree.add(7)<line_sep>tree.add(1)<line_sep>tree.add(3)<line_sep>tree.add(6)<line_sep>tree.add(9)<line_sep>#getTree returns the tree formatted in nested lists
formattedTree=tree.getTree()<line_sep>#reverse the levels
<for_stmt>level formattedTree<block_start>level.reverse()<line_sep>print(level)<block_end><block_end>
|
<import_stmt>json<import_stmt>pathlib<import_from_stmt>align.pnr.hpwl gen_netlist calculate_HPWL_from_placement_verilog_d Interval SemiPerimeter<import_from_stmt>align.pnr.render_placement standalone_overlap_checker<def_stmt>test_interval <block_start>i=Interval()<line_sep>i.add(7)<assert_stmt>0<eq>i.dist()<line_sep>i.add(3)<assert_stmt>4<eq>i.dist()<block_end><def_stmt>test_semiperimeter <block_start>sp=SemiPerimeter()<line_sep>sp.addPoint((3 7))<assert_stmt>0<eq>sp.dist()<line_sep>sp.addRect((10 10 12 12))<assert_stmt>14<eq>sp.dist()<block_end><def_stmt>test_gen_netlist <block_start>placement_verilog_d={"global_signals":[] "modules":[{"abstract_name":"top" "concrete_name":"top" "bbox":[0 0 100 100] "parameters":[] "instances":[{"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u0" "transformation":{"oX":0 "oY":0 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]} {"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u1" "transformation":{"oX":0 "oY":20 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]}]}] "leaves":[{"abstract_name":"a" "concrete_name":"a" "bbox":[0 0 10 10] "terminals":[{"name":"x" "rect":[4 4 6 6]}]}]}<line_sep>nets_d=gen_netlist(placement_verilog_d 'top')<assert_stmt>24<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d)<block_end><def_stmt>test_gen_netlist_flip <block_start>placement_verilog_d={"global_signals":[] "modules":[{"abstract_name":"top" "concrete_name":"top" "bbox":[0 0 100 100] "parameters":[] "instances":[{"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u0" "transformation":{"oX":0 "oY":0 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]} {"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u1" "transformation":{"oX":15 "oY":20 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]}]}] "leaves":[{"abstract_name":"a" "concrete_name":"a" "bbox":[0 0 10 10] "terminals":[{"name":"x" "rect":[1 2 3 4]}]}]}<line_sep>nets_d=gen_netlist(placement_verilog_d 'top')<assert_stmt>39<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d)<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']={"oX":10 "oY":0 "sX":-1 "sY":1}<assert_stmt>33<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d)<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']={"oX":10 "oY":10 "sX":-1 "sY":-1}<assert_stmt>29<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d)<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']={"oX":0 "oY":10 "sX":1 "sY":-1}<assert_stmt>35<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d)<block_end><def_stmt>test_gen_netlist <block_start>placement_verilog_d={"global_signals":[] "modules":[{"abstract_name":"top" "concrete_name":"top" "bbox":[0 0 100 100] "parameters":[] "instances":[{"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u0" "transformation":{"oX":0 "oY":0 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]} {"abstract_template_name":"a" "concrete_template_name":"a" "instance_name":"u1" "transformation":{"oX":0 "oY":20 "sX":1 "sY":1} "fa_map":[{"formal":"x" "actual":"y"}]}]}] "leaves":[{"abstract_name":"a" "concrete_name":"a" "bbox":[0 0 10 10] "terminals":[{"name":"x" "rect":[4 4 6 6]}]}] "global_signals":[{"actual":"y"}]}<line_sep>nets_d=gen_netlist(placement_verilog_d 'top')<assert_stmt>24<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d skip_globals=<false>)<assert_stmt>0<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d skip_globals=<true>)<line_sep>placement_verilog_d['global_signals'][0]['actual']="a"<assert_stmt>24<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d 'top' nets_d skip_globals=<true>)<block_end><def_stmt>test_gen_netlist_matrix <block_start>txt="""{
"global_signals": [],
"leaves": [
{
"abstract_name": "slice",
"bbox": [
0,
0,
800,
840
],
"concrete_name": "slice_a",
"terminal_centers": [
{
"center": [
400,
168
],
"name": "inp"
},
{
"center": [
400,
672
],
"name": "out"
}
],
"terminals": [
{
"name": "inp",
"rect": [
124,
152,
676,
184
]
},
{
"name": "out",
"rect": [
124,
656,
676,
688
]
}
]
}
],
"modules": [
{
"abstract_name": "matrix",
"bbox": [
0,
0,
2480,
3528
],
"concrete_name": "matrix_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "top_to_bottom",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2",
"u3"
]
}
],
"instances": [
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 2688,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 0,
"oY": 1764,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "x3",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 0,
"oY": 924,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "row",
"concrete_template_name": "row_0",
"fa_map": [
{
"actual": "x3",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u3",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
},
{
"abstract_name": "row",
"bbox": [
0,
0,
2480,
840
],
"concrete_name": "row_0",
"constraints": [
{
"abut": false,
"constraint": "order",
"direction": "left_to_right",
"instances": [
"u0",
"u1",
"u2"
]
},
{
"constraint": "same_template",
"instances": [
"u0",
"u1",
"u2"
]
}
],
"instances": [
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "inp",
"formal": "inp"
},
{
"actual": "x1",
"formal": "out"
}
],
"instance_name": "u0",
"transformation": {
"oX": 0,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x1",
"formal": "inp"
},
{
"actual": "x2",
"formal": "out"
}
],
"instance_name": "u1",
"transformation": {
"oX": 880,
"oY": 0,
"sX": 1,
"sY": 1
}
},
{
"abstract_template_name": "slice",
"concrete_template_name": "slice_a",
"fa_map": [
{
"actual": "x2",
"formal": "inp"
},
{
"actual": "out",
"formal": "out"
}
],
"instance_name": "u2",
"transformation": {
"oX": 1680,
"oY": 0,
"sX": 1,
"sY": 1
}
}
],
"parameters": [
"inp",
"out"
]
}
]
}
"""<line_sep>placement_verilog_d=json.loads(txt)<line_sep>cn='matrix_0'<line_sep>nets_d=gen_netlist(placement_verilog_d cn)<assert_stmt>27584<eq>calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>placement_verilog_d['modules'][1]['instances'][1]['transformation']["oY"]<augadd>840<line_sep>placement_verilog_d['modules'][1]['instances'][1]['transformation']["sY"]=-1<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl)<assert_stmt>27584<g>hpwl<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"]<augadd>2480<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["sX"]=-1<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl2=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl2)<assert_stmt>hpwl<g>hpwl2<line_sep>placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"]<augadd>2480<line_sep>placement_verilog_d['modules'][0]['instances'][3]['transformation']["sX"]=-1<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl3=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl3)<assert_stmt>hpwl2<g>hpwl3<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"]<augadd>840<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']["sY"]=-1<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"]<augadd>840<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["sY"]=-1<line_sep>placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"]<augadd>840<line_sep>placement_verilog_d['modules'][0]['instances'][2]['transformation']["sY"]=-1<line_sep>placement_verilog_d['modules'][0]['instances'][3]['transformation']["oY"]<augadd>840<line_sep>placement_verilog_d['modules'][0]['instances'][3]['transformation']["sY"]=-1<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl4=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl4)<assert_stmt>hpwl3<g>hpwl4<line_sep>placement_verilog_d['modules'][1]['instances'][1]['transformation']["oX"]<augsub>80<line_sep>placement_verilog_d['modules'][1]['instances'][2]['transformation']["oX"]<augsub>80<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl5=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl5)<assert_stmt>hpwl4<g>hpwl5<line_sep>placement_verilog_d['modules'][0]['instances'][0]['transformation']["oY"]<augsub>2<times>84<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["oY"]<augsub>84<line_sep>placement_verilog_d['modules'][0]['instances'][2]['transformation']["oY"]<augsub>84<line_sep>placement_verilog_d['modules'][0]['instances'][1]['transformation']["oX"]<augsub>80<line_sep>placement_verilog_d['modules'][0]['instances'][3]['transformation']["oX"]<augsub>80<assert_stmt>standalone_overlap_checker(placement_verilog_d cn)<line_sep>hpwl6=calculate_HPWL_from_placement_verilog_d(placement_verilog_d cn nets_d)<line_sep>print(hpwl6)<assert_stmt>hpwl5<g>hpwl6<line_sep>print(hpwl6/27584-1)<block_end>
|
<import_from_stmt>typing Dict<import_from_stmt>botocore.waiter Waiter<class_stmt>ChangeSetCreateComplete(Waiter)<block_start><def_stmt>wait self ChangeSetName:str StackName:str=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>"""
Polls :py:meth:`CloudFormation.Client.describe_change_set` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeChangeSet>`_
**Request Syntax**
::
waiter.wait(
ChangeSetName='string',
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type ChangeSetName: string
:param ChangeSetName: **[REQUIRED]**
The name or Amazon Resource Name (ARN) of the change set that you want to describe.
:type StackName: string
:param StackName:
If you specified the name of a change set, specify the stack name or ID (ARN) of the change set you want to describe.
:type NextToken: string
:param NextToken:
A string (provided by the DescribeChangeSet response output) that identifies the next page of information that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""<line_sep><pass><block_end><block_end><class_stmt>StackCreateComplete(Waiter)<block_start><def_stmt>wait self StackName:str=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""<line_sep><pass><block_end><block_end><class_stmt>StackDeleteComplete(Waiter)<block_start><def_stmt>wait self StackName:str=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""<line_sep><pass><block_end><block_end><class_stmt>StackExists(Waiter)<block_start><def_stmt>wait self StackName:str=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 5 seconds until a successful state is reached. An error is returned after 20 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 5
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 20
:returns: None
"""<line_sep><pass><block_end><block_end><class_stmt>StackUpdateComplete(Waiter)<block_start><def_stmt>wait self StackName:str=<none> NextToken:str=<none> WaiterConfig:Dict=<none><block_start>"""
Polls :py:meth:`CloudFormation.Client.describe_stacks` every 30 seconds until a successful state is reached. An error is returned after 120 failed checks.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStacks>`_
**Request Syntax**
::
waiter.wait(
StackName='string',
NextToken='string',
WaiterConfig={
'Delay': 123,
'MaxAttempts': 123
}
)
:type StackName: string
:param StackName:
The name or the unique stack ID that is associated with the stack, which are not always interchangeable:
* Running stacks: You can specify either the stack\'s name or its unique stack ID.
* Deleted stacks: You must specify the unique stack ID.
Default: There is no default value.
:type NextToken: string
:param NextToken:
A string that identifies the next page of stacks that you want to retrieve.
:type WaiterConfig: dict
:param WaiterConfig:
A dictionary that provides parameters to control waiting behavior.
- **Delay** *(integer) --*
The amount of time in seconds to wait between attempts. Default: 30
- **MaxAttempts** *(integer) --*
The maximum number of attempts to be made. Default: 120
:returns: None
"""<line_sep><pass><block_end><block_end>
|
<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>xv_leak_tools.test_components.component Component<class_stmt>Cleanup(Component metaclass=ABCMeta)<block_start>@abstractmethod<def_stmt>cleanup self<block_start><pass><block_end><block_end>
|
<import_from_stmt>. base<import_from_stmt>. fields<class_stmt>Invoice(base.TelegramObject)<block_start>"""
This object contains basic information about an invoice.
https://core.telegram.org/bots/api#invoice
"""<line_sep>title:base.String=fields.Field()<line_sep>description:base.String=fields.Field()<line_sep>start_parameter:base.String=fields.Field()<line_sep>currency:base.String=fields.Field()<line_sep>total_amount:base.Integer=fields.Field()<block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDAnalyzer DQMEDAnalyzer<line_sep>RawDataMon=DQMEDAnalyzer('SiStripMonitorRawData' OutputMEsInRootFile=cms.bool(<false>) DigiProducer=cms.string('siStripDigis') OutputFileName=cms.string('SiStripRawData.root'))<line_sep>
|
# -*- coding: utf-8 -*-
"""
Models
~~~~
:copyright: (c) 2017-2018 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""<import_from_stmt>.data_abstract DataAbstract<import_from_stmt>.raw Raw<import_from_stmt>.point Point<import_from_stmt>.thumb Thumb<import_from_stmt>.band_item BandItem<import_from_stmt>.user User<line_sep>
|
# 1、配置04_DataKit/config.ini
# 2、python 04_DataKit.py
# 3、程序自动生成Common/commodities.json Common/contracts.json
# 4、请注意simnow只能在开盘时间运行
# todo: statemonitor.json 硬编码无法移到配置文件目录中,等群主发功
# todo: 不支持 code:"CFFEX.T.HOT"或 code:"CFFEX.T",等群主发功
<import_from_stmt>wtpy WtDtEngine<if_stmt>__name__<eq>"__main__"#创建一个运行环境,并加入策略
<block_start>env=WtDtEngine()<line_sep>env.initialize("./04_DataKit/dtcfg.json" "./04_DataKit/logcfgdt.json")<line_sep>env.run()<line_sep>kw=input('press any key to exit\n')<block_end>
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pathdreamer.models.image_models."""<import_stmt>itertools<import_from_stmt>absl.testing parameterized<import_from_stmt>pathdreamer.models image_models<import_stmt>tensorflow<as>tf<class_stmt>ImageModelsTest(tf.test.TestCase parameterized.TestCase)<block_start>"""Tests on the image_models file."""<line_sep>@parameterized.parameters(list(itertools.product((1 2) (128 256) (41 ))))<def_stmt>test_model_output self batch_size image_size channels<block_start>"""Tests that encoder / decoder outputs correct shapes."""<line_sep>test_input=tf.random.uniform((batch_size image_size image_size channels) maxval=1 dtype=tf.int32)<line_sep>test_input=tf.cast(test_input tf.float32)<line_sep>hidden_dims=8<line_sep>test_encoder=image_models.ResNetEncoder(image_size=image_size hidden_dims=hidden_dims resnet_version='50')<line_sep>test_decoder=image_models.ResNetDecoder(image_size=image_size hidden_dims=hidden_dims output_dim=channels resnet_version='50')<line_sep>test_encoder_output,test_skip=test_encoder(test_input)<line_sep># Encoder output should be a vector of shape (N, output_dim).
self.assertEqual(test_encoder_output.shape[0] batch_size)<line_sep>self.assertLen(test_encoder_output.shape 2)<line_sep>tiled_encoder_output=test_encoder_output[: <none> <none> :]<line_sep>test_decoder_output=test_decoder(tiled_encoder_output test_skip)<line_sep># Decoder output should be equal to input shape.
self.assertEqual(test_decoder_output.shape test_input.shape)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
|
<import_from_stmt>models.PyCryptoBot PyCryptoBot<import_from_stmt>models.exchange.binance AuthAPI<as>BAuthAPI<import_from_stmt>models.exchange.coinbase_pro AuthAPI<as>CAuthAPI<line_sep># Coinbase Pro fees
app=PyCryptoBot(exchange='coinbasepro')<line_sep>api=CAuthAPI(app.getAPIKey() app.getAPISecret() app.getAPIPassphrase() app.getAPIURL())<line_sep>#print (api.getTakerFee())
#print (api.getTakerFee('BTC-GBP'))
#print (api.getMakerFee())
#print (api.getMakerFee('BTC-GBP'))
#print (api.getFees('BTCGBP'))
#print (api.getFees())
print(app.getMakerFee())<line_sep>print(app.getTakerFee())<line_sep># Binance fees
app=PyCryptoBot(exchange='binance')<line_sep>api=BAuthAPI(app.getAPIKey() app.getAPISecret() app.getAPIURL())<line_sep>#print (api.getTakerFee())
#print (api.getTakerFee('BTCGBP'))
#print (api.getMakerFee())
#print (api.getMakerFee('BTCGBP'))
#print (api.getFees('BTCGBP'))
#print (api.getFees())
print(app.getMakerFee())<line_sep>print(app.getTakerFee())<line_sep>
|
# std
<import_stmt>logging<import_stmt>os<import_stmt>subprocess<import_from_stmt>typing List<line_sep># project
<import_from_stmt>. Notifier Event<class_stmt>ScriptNotifier(Notifier)<block_start><def_stmt>__init__ self title_prefix:str config:dict<block_start>logging.info("Initializing script notifier.")<line_sep>super().__init__(title_prefix config)<try_stmt><block_start>self.script_path=config["script_path"]<block_end><except_stmt>KeyError<as>key<block_start>logging.error(f"Invalid config.yaml. Missing key: {key}")<block_end><if_stmt>self.script_path<block_start><if_stmt>os.path.isfile(self.script_path)<block_start><if_stmt>os.access(self.script_path os.X_OK)<is><false><block_start>logging.error(f"Invalid script path. File is not executable: {self.script_path}")<block_end><block_end><else_stmt><block_start>logging.error(f"Invalid script path. File does not exist: {self.script_path}")<line_sep>self.script_path=<none><block_end><block_end><block_end><def_stmt>send_events_to_user self events:List[Event]<arrow>bool<block_start><if_stmt>self.script_path<is><none><block_start><return><false><block_end><for_stmt>event events<block_start><if_stmt>event.type<in>self._notification_types<and>event.service<in>self._notification_services<block_start>subprocess.run([str(self.script_path) event.priority.name event.service.name event.message])<block_end><block_end><return><true><block_end><block_end>
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
<import_stmt>os<def_stmt>data_file filename<block_start><return>os.path.join(os.path.dirname(__file__) "test_data" filename)<block_end>
|
"""Samplers which run agents in environments."""<line_sep># yapf: disable
<import_from_stmt>garage.sampler._dtypes InProgressEpisode<import_from_stmt>garage.sampler._functions _apply_env_update<import_from_stmt>garage.sampler.default_worker DefaultWorker<import_from_stmt>garage.sampler.env_update EnvUpdate ExistingEnvUpdate NewEnvUpdate SetTaskUpdate <import_from_stmt>garage.sampler.fragment_worker FragmentWorker<import_from_stmt>garage.sampler.local_sampler LocalSampler<import_from_stmt>garage.sampler.multiprocessing_sampler MultiprocessingSampler<import_from_stmt>garage.sampler.ray_sampler RaySampler<import_from_stmt>garage.sampler.sampler Sampler<import_from_stmt>garage.sampler.vec_worker VecWorker<import_from_stmt>garage.sampler.worker Worker<import_from_stmt>garage.sampler.worker_factory WorkerFactory<line_sep># yapf: enable
__all__=['_apply_env_update' 'InProgressEpisode' 'FragmentWorker' 'Sampler' 'LocalSampler' 'RaySampler' 'MultiprocessingSampler' 'VecWorker' 'WorkerFactory' 'Worker' 'DefaultWorker' 'EnvUpdate' 'NewEnvUpdate' 'SetTaskUpdate' 'ExistingEnvUpdate' ]<line_sep>
|
<import_stmt>functools<import_from_stmt>hachoir.core.iso639 ISO639_2<line_sep>@functools.total_ordering<class_stmt>Language<block_start><def_stmt>__init__ self code<block_start>code=str(code)<if_stmt>code<not><in>ISO639_2<block_start><raise>ValueError("Invalid language code: %r"%code)<block_end>self.code=code<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other.__class__<ne>Language<block_start><return>NotImplemented<block_end><return>self.code<eq>other.code<block_end><def_stmt>__lt__ self other<block_start><if_stmt>other.__class__<ne>Language<block_start><return>NotImplemented<block_end><return>self.code<l>other.code<block_end><def_stmt>__str__ self<block_start><return>ISO639_2[self.code]<block_end><def_stmt>__repr__ self<block_start><return>"<Language '%s', code=%r>"%(str(self) self.code)<block_end><block_end>
|
<import_stmt>datetime<import_stmt>time<import_stmt>six<import_from_stmt>uuid uuid4<import_stmt>json<import_from_stmt>json JSONEncoder<import_stmt>math<import_stmt>re<import_from_stmt>cooka.common consts<import_from_stmt>os path<as>P<line_sep>MAX_BUFFER_SIZE=1024<line_sep>UUID_CHARS=("a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z")<def_stmt>short_uuid <block_start>uuid=str(uuid4()).replace('-' '')<line_sep>result=''<for_stmt>i range(0 8)<block_start>sub=uuid[i<times>4:i<times>4+4]<line_sep>x=int(sub 16)<line_sep>result<augadd>UUID_CHARS[x%0x3E]<block_end><return>result<block_end><def_stmt>human_datetime date=<none><block_start><if_stmt>date<is><none><block_start>date=datetime.datetime.now()<block_end><return>date.strftime("%Y%m%d%H%M%S%f")<block_end><def_stmt>human_std_datetime date=<none><block_start><if_stmt>date<is><none><block_start>date=datetime.datetime.now()<block_end><return>date.strftime("%Y-%m-%d %H:%M:%S")<block_end><def_stmt>cut_suffix file_name<block_start>last_position=file_name.rfind('.')<if_stmt>last_position<g>-1<block_start><return>file_name[:last_position]<block_end><else_stmt><block_start><return>file_name<block_end><block_end><def_stmt>analyze_data_job_name file_name _datetime=<none><block_start>d=human_datetime(_datetime)<line_sep><return>f'job_analyze_{cut_suffix(file_name)}_{d}'<block_end><def_stmt>predict_job_name dataset_name _datetime=<none><block_start>d=human_datetime(_datetime)<line_sep><return>f'job_predict_{cut_suffix(dataset_name)}_{d}'<block_end><def_stmt>temporary_upload_file_path filename<block_start><return>f'{consts.PATH_TMP_UPLOAD}/{short_uuid()}/{filename}'<block_end><def_stmt>get_file_suffix file_name<block_start>last_position=file_name.rfind('.')<if_stmt>last_position<g>-1<block_start><return>file_name[last_position:]<block_end><else_stmt><block_start><raise>NameError(f"File {file_name} has no suffix. ")<block_end><block_end><def_stmt>human_data_size value<block_start><def_stmt>r v unit<block_start><return>"%s%s"%(round(v 2) unit)<block_end><if_stmt>value<l>1024<times>1024<block_start><return>r(value/1024 "KB")<block_end><elif_stmt>1024<times>1024<l>value<le>1024<times>1024<times>1024<block_start><return>r(value/1024/1024 "MB")<block_end><else_stmt><block_start><return>r(value/1024/1024/1024 "GB")<block_end><block_end><def_stmt>get_now_datetime <block_start><return>datetime.datetime.now()<block_end><def_stmt>get_now_long <block_start><return>round(time.time()<times>1000)<block_end><def_stmt>to_timestamp d<block_start><return>round(d.timestamp()<times>1000)<block_end><class_stmt>NaNEncoder(JSONEncoder)<block_start><def_stmt>default self obj<block_start><try_stmt><block_start>_=iter(obj)<block_end><except_stmt>TypeError<block_start><if_stmt>isinstance(obj float)<and>math.isnan(obj)<block_start><return>"null"<block_end><elif_stmt>isinstance(obj datetime.datetime)<block_start><return>to_timestamp(obj)<block_end><block_end><return>JSONEncoder.default(self obj)<block_end><block_end><def_stmt>dumps d indent=4<block_start>"""
防止生成Unicode
:param d:
:return:
"""<import_stmt>six<if_stmt>six.PY2<block_start><return>json.dumps(d ensure_ascii=<false> encoding='utf-8' indent=indent cls=NaNEncoder)<block_end><else_stmt><block_start><return>json.dumps(d ensure_ascii=<false> indent=indent cls=NaNEncoder)<block_end><block_end><def_stmt>dumps_bytes d<block_start>"""
防止生成Unicode
:param d:
:return:
"""<line_sep>str_data=dumps(d)<line_sep><return>to_bytes(str_data)<block_end><def_stmt>loads s<block_start>"""
防止生成Unicode
:param s:
:return:
"""<line_sep>d=json.loads(s)<line_sep><return>byteify(d)<block_end><def_stmt>to_str sv<block_start>"""将unicode和python3中的字节转换成字符串。
Args:
sv(Union(bytes, unicode, object)): 字节、unicode或者其他类型的数据转换为字符串;
Returns:
str: 字符串数据。
"""<if_stmt>six.PY2<block_start><if_stmt>isinstance(sv unicode)<block_start><return>sv.encode('utf-8')<block_end><else_stmt><block_start><return>str(sv)<block_end><block_end><else_stmt># 在py3以及更高的版本中
<block_start><if_stmt>isinstance(sv bytes)<block_start><return>str(sv encoding='utf-8')<block_end><else_stmt><block_start><return>str(sv)<block_end><block_end><block_end><def_stmt>to_bytes s<block_start>"""将字符串转换为字节数组。
Args:
s (Union(str, unicode)): 需要转换为字节的数据,在python2中支持类型str和unicode;在py3中支持str。
Returns:
字节数据。
"""<if_stmt>six.PY2# 在python2中字符串就是字节数组
<block_start><if_stmt>isinstance(s unicode)<block_start><return>s.encode('utf-8')<block_end><elif_stmt>isinstance(s str)<block_start><return>s<block_end><else_stmt><block_start><raise>Exception("无法将类型%s转换为字节"%type(s).__name__)<block_end><block_end><else_stmt># 在py3以及更高的版本中
<block_start><if_stmt>isinstance(s str)<block_start><return>bytes(s encoding="utf-8")<block_end><elif_stmt>isinstance(s bytes)<block_start><return>s<block_end><else_stmt><block_start><raise>Exception("无法将类型%s转换为字节"%type(s).__name__)<block_end><block_end><block_end><def_stmt>byteify s encoding='utf-8'<block_start>"""
把Dict中的Unicode转换为字符串
:param s:
:param encoding:
:return:
"""<if_stmt>isinstance(s dict)<block_start>r={}<for_stmt>k s<block_start>r[byteify(k)]=byteify(s[k])<block_end><return>r<block_end><elif_stmt>isinstance(s list)<block_start><return>[byteify(element)<for>element s]<block_end><elif_stmt>type(s).__name__<eq>'unicode'<block_start><return>s.encode(encoding)<block_end><else_stmt><block_start><return>s<block_end><block_end><def_stmt>datetime_diff end start<block_start><return>round((end-start).total_seconds() 2)<block_end># in seconds
<def_stmt>_divide n1 n2<block_start>r1=int(n1/n2)<line_sep>r2=n1%n2<line_sep><return>r1 r2<block_end><def_stmt>human_format_by_minute seconds<block_start>unit_day=3600<times>24<line_sep>unit_hour=3600<line_sep>unit_minute=60<if_stmt>seconds<ge>unit_day# by hour
<block_start>n_days,remain_seconds=_divide(seconds unit_day)<line_sep>n_hours,remain_seconds=_divide(remain_seconds unit_hour)<line_sep>n_minutes,remain_seconds=_divide(remain_seconds unit_minute)<line_sep><return>f"{n_days}d {n_hours}h {n_minutes}m"<block_end><if_stmt>seconds<ge>unit_hour# by hour
<block_start>n_hour,remain_seconds=_divide(seconds unit_hour)<line_sep>n_minutes,remain_seconds=_divide(remain_seconds unit_minute)<line_sep><return>f"{n_hour}h {n_minutes}m"<block_end><elif_stmt>seconds<ge>unit_minute<block_start>n_minutes,remain_seconds=_divide(seconds unit_minute)<line_sep><return>f"{n_minutes}m"<block_end><else_stmt><block_start><return>"<1m"<block_end><block_end><def_stmt>datetime_diff_human_format_by_minute end start<block_start>seconds=round((end-start).total_seconds() 2)# in seconds
<return>human_format_by_minute(seconds)<block_end><def_stmt>time_diff end start<block_start>delta=end-start<line_sep><return>round(delta 2)<block_end># in seconds
<def_stmt>tail file_path n=100<block_start>""" Tail file.
Read file from tail using seek. Read 1024 chars every time and find .
Args:
file_path: a text file only
n:
Returns:
Known Issues:
1. n=1 may see nothing, please check is the file end with a ''
"""<with_stmt>open(file_path 'r')<as>f<block_start>file_size=f.seek(0 2)# seek tail
current_position=file_size<line_sep>line_count=0<line_sep>first_line_position=0<while_stmt>current_position<g>0<block_start><if_stmt>current_position<l>MAX_BUFFER_SIZE<block_start>f.seek(0)<line_sep>buffer_size=current_position<line_sep>current_position=0<block_end><else_stmt><block_start>current_position=current_position-MAX_BUFFER_SIZE<line_sep>f.seek(current_position)<line_sep>buffer_size=MAX_BUFFER_SIZE<block_end>data=f.read(buffer_size)<line_sep>data_len=len(data)<for_stmt>i range(data_len)<block_start>p=data_len-i-1<if_stmt>data[p]<eq>''<block_start>line_count=line_count+1<block_end><if_stmt>line_count<eq>n<block_start>first_line_position=current_position+p+1<block_end><block_end><block_end># does not include break
f.seek(first_line_position)<while_stmt><true><block_start>_d=f.readline()<if_stmt>_d<is><not><none><and>len(_d)<g>0<block_start><yield>_d<block_end><else_stmt><block_start><break><block_end><block_end><block_end><block_end><def_stmt>readall p<block_start><with_stmt>open(p 'r')<as>f<block_start><return>f.read()<block_end><block_end><def_stmt>read_text p<block_start><with_stmt>open(p 'r' encoding='utf-8')<as>f<block_start><return>f.read()<block_end><block_end><def_stmt>load p<block_start><return>loads(readall(p))<block_end><def_stmt>make_dataset_name name<block_start>"""Dataset name contains "letters, numbers, -, _" only, any other content will be replaced with "-"
"""<def_stmt>may_replace c<block_start><if_stmt>re.match("\w" c)<is><none><block_start><if_stmt>c<eq>'-'<block_start><return>c<block_end><else_stmt><block_start><return>"_"<block_end><block_end><else_stmt><block_start><return>c<block_end><block_end><return>"".join([may_replace(c)<for>c name])<block_end><def_stmt>require_type name o t<block_start><if_stmt>o<is><not><none><block_start><if_stmt><not>isinstance(o t)<block_start><raise>Exception("'%s'需要%s类型。"%(name t.__name__))<block_end><block_end><block_end><def_stmt>require_attr_not_none o name<block_start>"""校验对象中的属性不能为空。
Args:
o:
name: 属性的名称。
Returns:
"""<if_stmt>o<is><not><none><block_start><if_stmt>getattr(o name <none>)<is><none><block_start><raise>Exception("对象=%s的属性'%s'不能为空。"%(str(o) name))<block_end><block_end><block_end><def_stmt>require_list_non_empty name o<block_start>"""校验数组不能为空。
Args:
name: 提示对象名称。
o: 数组对象。
Returns:
"""<if_stmt>is_non_empty_list(o)<block_start><pass><block_end><else_stmt><block_start><raise>Exception("'%s' 不能为空。"%name)<block_end><block_end><def_stmt>require_str_non_empty str_obj tips<block_start>"""校验数组不能为空。
Args:
str_obj: 字符串对象。
tips: 为空时的提示信息。
Returns:
"""<if_stmt>str_obj<is><none><or>len(str_obj)<eq>0<block_start><raise>Exception("'%s' 不能为空。"%tips)<block_end><block_end><def_stmt>cast_type o _type<block_start><if_stmt>o<is><none><block_start><return>o<block_end><else_stmt><block_start><if_stmt>_type<eq>int<block_start><if_stmt><not>isinstance(o int)<block_start><return>int(o)# may raise error
<block_end><else_stmt><block_start><return>o<block_end><block_end><if_stmt>_type<eq>float<block_start><if_stmt><not>isinstance(o float)<block_start><return>float(o)# may raise error
<block_end><else_stmt><block_start><return>o<block_end><block_end><elif_stmt>_type<eq>str<block_start><return>str(o)<block_end><else_stmt><block_start><raise>ValueError(f"Not supported convert type: {_type}")<block_end><block_end><block_end><def_stmt>require_in_dict _dict key _type=int default=<none><block_start>v=_dict.get(key default)<if_stmt>v<is><none><block_start><raise>ValueError(f"Key={key} can not be None.")<block_end><else_stmt><block_start><if_stmt>isinstance(v _type)<block_start><return>v<block_end><else_stmt><block_start><return>cast_type(v _type)<block_end><block_end><block_end><def_stmt>get_from_dict _dict key _type=int default=<none><block_start>v=_dict.get(key default)<if_stmt>v<is><none><block_start><return>v<block_end><else_stmt><block_start><if_stmt>isinstance(v _type)<block_start><return>v<block_end><else_stmt><block_start><return>cast_type(v _type)<block_end><block_end><block_end><def_stmt>is_non_empty_list o<block_start><return>o<is><not><none><and>len(o)<g>0<block_end><def_stmt>is_empty_list o<block_start><return>o<is><none><or>len(o)<eq>0<block_end><def_stmt>is_non_empty_str o<block_start><return>o<is><not><none><and>isinstance(o str)<and>len(o)<g>0<block_end><def_stmt>revert_to_dict_from_dict d key<block_start>v=d.get(key)<if_stmt>v<is><not><none><and>len(v)<g>0<and>isinstance(v str)<block_start>d[key]=loads(v)<block_end><block_end><def_stmt>revert_to_dict_from_object obj *keys<block_start><for_stmt>key keys<block_start>v=getattr(obj key)<if_stmt>v<is><not><none><and>len(v)<g>0<and>isinstance(v str)<block_start>setattr(obj key loads(v))<block_end><block_end><block_end><def_stmt>sqlalchemy_obj_to_dict entity_instance<block_start><return>{attr.key:getattr(entity_instance attr.key)<for>attr entity_instance._sa_instance_state.attrs}<block_end># s = datetime_diff_human_format_by_minute(get_now_datetime(), datetime.datetime(2019,9,29,10,10,10,10) )
# print(s)
<def_stmt>temporary_dataset_dir dataset_name<block_start><return>P.join(consts.PATH_TEMPORARY_DATASET dataset_name)<block_end><def_stmt>dataset_dir dataset_name<block_start><return>P.join(consts.PATH_DATASET dataset_name)<block_end><def_stmt>model_name dataset_name no_experiment<block_start><return>str("%s_%s"%(dataset_name no_experiment))<block_end><def_stmt>model_dir dataset_name model_name<block_start><return>P.join(dataset_dir(dataset_name) consts.FIELD_EXPERIMENT model_name)<block_end><def_stmt>read_csv csv_file has_header default_headers=<none><block_start><import_stmt>pandas<as>pd# took a lot of time(0.4s)
<if_stmt>has_header<block_start><return>pd.read_csv(csv_file)# read it all
<block_end><else_stmt><block_start><if_stmt>default_headers<is><none><block_start><raise>ValueError("When has_header is False, param default_headers is required.")<block_end>df=pd.read_csv(csv_file header=<none>)<line_sep>df.columns=default_headers<line_sep><return>df<block_end><block_end><def_stmt>relative_path p:str prefix=consts.DATA_DIR<block_start><if_stmt>p.startswith(prefix)<block_start><return>p[len(prefix)+1:]# Fix: should not start with '/'
<block_end><else_stmt><block_start><raise>ValueError(f"Path is not start with {prefix}.")<block_end><block_end><import_stmt>pickle<line_sep>ENCODING_LIST=["iso-8859-1" "ascii" 'utf-8' "gbk" "gb2312" "gb18030"]<line_sep>PICKLE_PROTOCOL=2<if_stmt>six.PY2<block_start>PICKLE_PROTOCOL=2<block_end><elif_stmt>six.PY3<block_start>PICKLE_PROTOCOL=3<block_end><def_stmt>serialize_with_ignore_variables obj variables<block_start>"""
序列化对象时忽略部分属性。
:param obj:
:param variables:
:return:
"""<if_stmt>variables<is><none><block_start>variables=[]<block_end>cache_map={}<line_sep># 1. 忽略对象
<for_stmt>v_name variables<block_start><if_stmt>hasattr(obj v_name)<block_start>value=getattr(obj v_name)<line_sep>cache_map[v_name]=value<line_sep>setattr(obj v_name <none>)<block_end><block_end># 2. 导出数据
bytes_value=pickle.dumps(obj protocol=PICKLE_PROTOCOL)<line_sep># 3. 还原对象
<for_stmt>k cache_map<block_start>setattr(obj k cache_map[k])<block_end><return>bytes_value<block_end><def_stmt>deserialize data<block_start><if_stmt>six.PY2<block_start><return>pickle.loads(data)<block_end><else_stmt><block_start>_e=<none><for_stmt>encoding ENCODING_LIST<block_start><try_stmt><block_start>obj=pickle.loads(data encoding=encoding)<line_sep><return>obj<block_end><except_stmt>Exception<as>e<block_start>_e=e<line_sep>print("使用编码%s加载对象失败, 原因 %s。"%(encoding str(e)))<block_end><block_end><raise>_e<block_end><block_end><def_stmt>load_pkl file_path<block_start><with_stmt>open(file_path 'rb')<as>f<block_start>data=f.read()<line_sep><return>deserialize(data)<block_end><block_end><def_stmt>serialize2bytes obj<block_start><return>serialize_with_ignore_variables(obj <none>)<block_end><def_stmt>serialize2file obj path<block_start>data=serialize_with_ignore_variables(obj <none>)<with_stmt>open(path 'wb')<as>f<block_start>f.write(data)<block_end><block_end><def_stmt>script_path script<block_start><return>f"{consts.PATH_INSTALL_HOME}/cooka/core/{script}"<block_end><def_stmt>abs_path p<block_start><return>P.join(consts.DATA_DIR p)<block_end><def_stmt>validate_sample_conf sample_conf<block_start><import_from_stmt>cooka.common.model SampleConf# fix import error
<if_stmt>sample_conf.sample_strategy<eq>SampleConf.Strategy.Percentage<block_start><if_stmt>sample_conf.percentage<le>0<or>sample_conf.percentage<g>100<block_start><raise>ValueError(f"Param sample_conf.percentage should in (0, 100] but is {sample_conf.percentage}")<block_end><block_end><elif_stmt>sample_conf.sample_strategy<eq>SampleConf.Strategy.RandomRows<block_start><if_stmt>sample_conf.n_rows<le>0<block_start><raise>ValueError(f"Param sample_conf.n_rows should bigger than 0 but is {sample_conf.n_rows}")<block_end><block_end><elif_stmt>sample_conf.sample_strategy<eq>SampleConf.Strategy.WholeData<block_start><pass><block_end><else_stmt><block_start><raise>ValueError(f"Unknown sample strategy: {sample_conf.sample_strategy}")<block_end><block_end>
|
<import_from_stmt>os path<import_stmt>setuptools<import_from_stmt>setuptools.config read_configuration<line_sep>BASE_PATH=path.dirname(__file__)<line_sep>CFG_PATH=path.join(BASE_PATH "setup.cfg")<line_sep>config=read_configuration(CFG_PATH)<line_sep>version=config["metadata"]["version"]<line_sep>setuptools.setup(name="dbnd-docker" package_dir={"":"src"} install_requires=["dbnd=="+version "dbnd-airflow=="+version "docker>=3.0" # k8s
"kubernetes>=9.0.0" "cryptography>=2.0.0" ] entry_points={"dbnd":["dbnd-docker = dbnd_docker._plugin"]} )<line_sep>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.