content
stringlengths 0
1.55M
|
---|
# basic tuple functionality
x=(1 2 3<times>4)<line_sep>print(x)<try_stmt><block_start>x[0]=4<block_end><except_stmt>TypeError<block_start>print("TypeError")<block_end>print(x)<try_stmt><block_start>x.append(5)<block_end><except_stmt>AttributeError<block_start>print("AttributeError")<block_end>print(x[1:])<line_sep>print(x[:-1])<line_sep>print(x[2:3])<line_sep>print(x+(10 100 10000))<line_sep> |
<import_stmt>re<def_stmt>cw2us x# capwords to underscore notation
<block_start><return>re.sub(r'(?<=[a-z])[A-Z]|(?<!^)[A-Z](?=[a-z])' r"_\g<0>" x).lower()<block_end><def_stmt>mc2us x# mixed case to underscore notation
<block_start><return>cw2us(x)<block_end><def_stmt>us2mc x# underscore to mixed case notation
<block_start><return>re.sub(r'_([a-z])' <lambda>m:(m.group(1).upper()) x)<block_end><def_stmt>us2cw x# underscore to capwords notation
<block_start>s=us2mc(x)<line_sep><return>s[0].upper()+s[1:]<block_end>##
## Expected output:
##
## >>> cw2us("PrintHTML")
## 'print_html'
## >>> cw2us("IOError")
## 'io_error'
## >>> cw2us("SetXYPosition")
## 'set_xy_position'
## >>> cw2us("GetX")
## 'get_x'
##
|
# Store all kinds of lookup table.
# # generate rsPoly lookup table.
# from qrcode import base
# def create_bytes(rs_blocks):
# for r in range(len(rs_blocks)):
# dcCount = rs_blocks[r].data_count
# ecCount = rs_blocks[r].total_count - dcCount
# rsPoly = base.Polynomial([1], 0)
# for i in range(ecCount):
# rsPoly = rsPoly * base.Polynomial([1, base.gexp(i)], 0)
# return ecCount, rsPoly
# rsPoly_LUT = {}
# for version in range(1,41):
# for error_correction in range(4):
# rs_blocks_list = base.rs_blocks(version, error_correction)
# ecCount, rsPoly = create_bytes(rs_blocks_list)
# rsPoly_LUT[ecCount]=rsPoly.num
# print(rsPoly_LUT)
# Result. Usage: input: ecCount, output: Polynomial.num
# e.g. rsPoly = base.Polynomial(LUT.rsPoly_LUT[ecCount], 0)
rsPoly_LUT={7:[1 127 122 154 164 11 68 117] 10:[1 216 194 159 111 199 94 95 113 157 193] 13:[1 137 73 227 17 177 17 52 13 46 43 83 132 120] 15:[1 29 196 111 163 112 74 10 105 105 139 132 151 32 134 26] 16:[1 59 13 104 189 68 209 30 8 163 65 41 229 98 50 36 59] 17:[1 119 66 83 120 119 22 197 83 249 41 143 134 85 53 125 99 79] 18:[1 239 251 183 113 149 175 199 215 240 220 73 82 173 75 32 67 217 146] 20:[1 152 185 240 5 111 99 6 220 112 150 69 36 187 22 228 198 121 121 165 174] 22:[1 89 179 131 176 182 244 19 189 69 40 28 137 29 123 67 253 86 218 230 26 145 245] 24:[1 122 118 169 70 178 237 216 102 115 150 229 73 130 72 61 43 206 1 237 247 127 217 144 117] 26:[1 246 51 183 4 136 98 199 152 77 56 206 24 145 40 209 117 233 42 135 68 70 144 146 77 43 94] 28:[1 252 9 28 13 18 251 208 150 103 174 100 41 167 12 247 56 117 119 233 127 181 100 121 147 176 74 58 197] 30:[1 212 246 77 73 195 192 75 98 5 70 103 177 22 217 138 51 181 246 72 25 18 46 228 74 216 195 11 106 130 150]}<line_sep> |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
<import_stmt>argparse<import_from_stmt>base64 b64decode<import_stmt>csv<import_stmt>getpass<import_stmt>hashlib<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>sys<import_stmt>traceback<import_stmt>random<import_stmt>yaml<import_stmt>pkg_resources<import_from_stmt>colorlog ColoredFormatter<import_from_stmt>sawtooth_signing create_context<import_from_stmt>sawtooth_signing CryptoFactory<import_from_stmt>sawtooth_signing ParseError<import_from_stmt>sawtooth_signing.secp256k1 Secp256k1PrivateKey<import_from_stmt>sawtooth_cli.exceptions CliException<import_from_stmt>sawtooth_cli.rest_client RestClient<import_from_stmt>sawtooth_cli.protobuf.settings_pb2 SettingsPayload<import_from_stmt>sawtooth_cli.protobuf.settings_pb2 SettingProposal<import_from_stmt>sawtooth_cli.protobuf.settings_pb2 SettingVote<import_from_stmt>sawtooth_cli.protobuf.settings_pb2 SettingCandidates<import_from_stmt>sawtooth_cli.protobuf.setting_pb2 Setting<import_from_stmt>sawtooth_cli.protobuf.transaction_pb2 TransactionHeader<import_from_stmt>sawtooth_cli.protobuf.transaction_pb2 Transaction<import_from_stmt>sawtooth_cli.protobuf.batch_pb2 BatchHeader<import_from_stmt>sawtooth_cli.protobuf.batch_pb2 Batch<import_from_stmt>sawtooth_cli.protobuf.batch_pb2 BatchList<line_sep>DISTRIBUTION_NAME='sawset'<line_sep>SETTINGS_NAMESPACE='000000'<line_sep>_MIN_PRINT_WIDTH=15<line_sep>_MAX_KEY_PARTS=4<line_sep>_ADDRESS_PART_SIZE=16<def_stmt>add_config_parser subparsers parent_parser<block_start>"""Creates the arg parsers needed for the config command and
its subcommands.
"""<line_sep>parser=subparsers.add_parser('config' help='Changes genesis block settings and create, view, and '<concat>'vote on settings proposals' description='Provides subcommands to change genesis block settings '<concat>'and to view, create, and vote on existing proposals.')<line_sep>config_parsers=parser.add_subparsers(title="subcommands" dest="subcommand")<line_sep>config_parsers.required=<true><block_end><def_stmt>_do_config_proposal_create args<block_start>"""Executes the 'proposal create' subcommand. Given a key file, and a
series of key/value pairs, it generates batches of sawtooth_settings
transactions in a BatchList instance. The BatchList is either stored to a
file or submitted to a validator, depending on the supplied CLI arguments.
"""<line_sep>settings=[s.split('=' 1)<for>s args.setting]<line_sep>signer=_read_signer(args.key)<line_sep>txns=[_create_propose_txn(signer setting)<for>setting settings]<line_sep>batch=_create_batch(signer txns)<line_sep>batch_list=BatchList(batches=[batch])<if_stmt>args.output<is><not><none><block_start><try_stmt><block_start><with_stmt>open(args.output 'wb')<as>batch_file<block_start>batch_file.write(batch_list.SerializeToString())<block_end><block_end><except_stmt>IOError<as>e<block_start><raise>CliException('Unable to write to batch file: {}'.format(str(e)))<from>e<block_end><block_end><elif_stmt>args.sabre_output<is><not><none><block_start><for_stmt>i,txn enumerate(txns)<block_start><with_stmt>open("{}-{}".format(args.sabre_output i) 'wb')<as>outfile<block_start>outfile.write(txn.payload)<block_end><block_end><block_end><elif_stmt>args.url<is><not><none><block_start>rest_client=RestClient(args.url)<line_sep>rest_client.send_batches(batch_list)<block_end><else_stmt><block_start><raise>AssertionError('No target for create set.')<block_end><block_end><def_stmt>_do_config_proposal_list args<block_start>"""Executes the 'proposal list' subcommand.
Given a url, optional filters on prefix and public key, this command lists
the current pending proposals for settings changes.
"""<def_stmt>_accept candidate public_key prefix# Check to see if the first public key matches the given public key
# (if it is not None). This public key belongs to the user that
# created it.
<block_start>has_pub_key=(<not>public_key<or>candidate.votes[0].public_key<eq>public_key)<line_sep>has_prefix=candidate.proposal.setting.startswith(prefix)<line_sep><return>has_prefix<and>has_pub_key<block_end>candidates_payload=_get_proposals(RestClient(args.url))<line_sep>candidates=[c<for>c candidates_payload.candidates<if>_accept(c args.public_key args.filter)]<if_stmt>args.format<eq>'default'<block_start><for_stmt>candidate candidates<block_start>print('{}: {} => {}'.format(candidate.proposal_id candidate.proposal.setting candidate.proposal.value))<block_end><block_end><elif_stmt>args.format<eq>'csv'<block_start>writer=csv.writer(sys.stdout quoting=csv.QUOTE_ALL)<line_sep>writer.writerow(['PROPOSAL_ID' 'KEY' 'VALUE'])<for_stmt>candidate candidates<block_start>writer.writerow([candidate.proposal_id candidate.proposal.setting candidate.proposal.value])<block_end><block_end><elif_stmt>args.format<eq>'json'<or>args.format<eq>'yaml'<block_start>candidates_snapshot={c.proposal_id:{c.proposal.setting:c.proposal.value}<for>c candidates}<if_stmt>args.format<eq>'json'<block_start>print(json.dumps(candidates_snapshot indent=2 sort_keys=<true>))<block_end><else_stmt><block_start>print(yaml.dump(candidates_snapshot default_flow_style=<false>)[0:-1])<block_end><block_end><else_stmt><block_start><raise>AssertionError('Unknown format {}'.format(args.format))<block_end><block_end><def_stmt>_do_config_proposal_vote args<block_start>"""Executes the 'proposal vote' subcommand. Given a key file, a proposal
id and a vote value, it generates a batch of sawtooth_settings transactions
in a BatchList instance. The BatchList is file or submitted to a
validator.
"""<line_sep>signer=_read_signer(args.key)<line_sep>rest_client=RestClient(args.url)<line_sep>proposals=_get_proposals(rest_client)<line_sep>proposal=<none><for_stmt>candidate proposals.candidates<block_start><if_stmt>candidate.proposal_id<eq>args.proposal_id<block_start>proposal=candidate<line_sep><break><block_end><block_end><if_stmt>proposal<is><none><block_start><raise>CliException('No proposal exists with the given id')<block_end><for_stmt>vote_record proposal.votes<block_start><if_stmt>vote_record.public_key<eq>signer.get_public_key().as_hex()<block_start><raise>CliException('A vote has already been recorded with this signing key')<block_end><block_end>txn=_create_vote_txn(signer args.proposal_id proposal.proposal.setting args.vote_value)<line_sep>batch=_create_batch(signer [txn])<line_sep>batch_list=BatchList(batches=[batch])<line_sep>rest_client.send_batches(batch_list)<block_end><def_stmt>_do_config_genesis args<block_start>signer=_read_signer(args.key)<line_sep>public_key=signer.get_public_key().as_hex()<line_sep>authorized_keys=args.authorized_key<if>args.authorized_key<else>[public_key]<if_stmt>public_key<not><in>authorized_keys<block_start>authorized_keys.append(public_key)<block_end>txns=[]<line_sep>txns.append(_create_propose_txn(signer ('sawtooth.settings.vote.authorized_keys' ','.join(authorized_keys))))<if_stmt>args.approval_threshold<is><not><none><block_start><if_stmt>args.approval_threshold<l>1<block_start><raise>CliException('approval threshold must not be less than 1')<block_end><if_stmt>args.approval_threshold<g>len(authorized_keys)<block_start><raise>CliException('approval threshold must not be greater than the number of '<concat>'authorized keys')<block_end>txns.append(_create_propose_txn(signer ('sawtooth.settings.vote.approval_threshold' str(args.approval_threshold))))<block_end>batch=_create_batch(signer txns)<line_sep>batch_list=BatchList(batches=[batch])<try_stmt><block_start><with_stmt>open(args.output 'wb')<as>batch_file<block_start>batch_file.write(batch_list.SerializeToString())<block_end>print('Generated {}'.format(args.output))<block_end><except_stmt>IOError<as>e<block_start><raise>CliException('Unable to write to batch file: {}'.format(str(e)))<from>e<block_end><block_end><def_stmt>_get_proposals rest_client<block_start>state_leaf=rest_client.get_leaf(_key_to_address('sawtooth.settings.vote.proposals'))<line_sep>config_candidates=SettingCandidates()<if_stmt>state_leaf<is><not><none><block_start>setting_bytes=b64decode(state_leaf['data'])<line_sep>setting=Setting()<line_sep>setting.ParseFromString(setting_bytes)<line_sep>candidates_bytes=<none><for_stmt>entry setting.entries<block_start><if_stmt>entry.key<eq>'sawtooth.settings.vote.proposals'<block_start>candidates_bytes=entry.value<block_end><block_end><if_stmt>candidates_bytes<is><not><none><block_start>decoded=b64decode(candidates_bytes)<line_sep>config_candidates.ParseFromString(decoded)<block_end><block_end><return>config_candidates<block_end><def_stmt>_read_signer key_filename<block_start>"""Reads the given file as a hex key.
Args:
key_filename: The filename where the key is stored. If None,
defaults to the default key for the current user.
Returns:
Signer: the signer
Raises:
CliException: If unable to read the file.
"""<line_sep>filename=key_filename<if_stmt>filename<is><none><block_start>filename=os.path.join(os.path.expanduser('~') '.sawtooth' 'keys' getpass.getuser()+'.priv')<block_end><try_stmt><block_start><with_stmt>open(filename 'r')<as>key_file<block_start>signing_key=key_file.read().strip()<block_end><block_end><except_stmt>IOError<as>e<block_start><raise>CliException('Unable to read key file: {}'.format(str(e)))<from>e<block_end><try_stmt><block_start>private_key=Secp256k1PrivateKey.from_hex(signing_key)<block_end><except_stmt>ParseError<as>e<block_start><raise>CliException('Unable to read key in file: {}'.format(str(e)))<from>e<block_end>context=create_context('secp256k1')<line_sep>crypto_factory=CryptoFactory(context)<line_sep><return>crypto_factory.new_signer(private_key)<block_end><def_stmt>_create_batch signer transactions<block_start>"""Creates a batch from a list of transactions and a public key, and signs
the resulting batch with the given signing key.
Args:
signer (:obj:`Signer`): The cryptographic signer
transactions (list of `Transaction`): The transactions to add to the
batch.
Returns:
`Batch`: The constructed and signed batch.
"""<line_sep>txn_ids=[txn.header_signature<for>txn transactions]<line_sep>batch_header=BatchHeader(signer_public_key=signer.get_public_key().as_hex() transaction_ids=txn_ids).SerializeToString()<line_sep><return>Batch(header=batch_header header_signature=signer.sign(batch_header) transactions=transactions)<block_end><def_stmt>_create_propose_txn signer setting_key_value<block_start>"""Creates an individual sawtooth_settings transaction for the given
key and value.
"""<line_sep>setting_key,setting_value=setting_key_value<line_sep>nonce=hex(random.randint(0 2<power>64))<line_sep>proposal=SettingProposal(setting=setting_key value=setting_value nonce=nonce)<line_sep>payload=SettingsPayload(data=proposal.SerializeToString() action=SettingsPayload.PROPOSE)<line_sep><return>_make_txn(signer setting_key payload)<block_end><def_stmt>_create_vote_txn signer proposal_id setting_key vote_value<block_start>"""Creates an individual sawtooth_settings transaction for voting on a
proposal for a particular setting key.
"""<if_stmt>vote_value<eq>'accept'<block_start>vote_id=SettingVote.ACCEPT<block_end><else_stmt><block_start>vote_id=SettingVote.REJECT<block_end>vote=SettingVote(proposal_id=proposal_id vote=vote_id)<line_sep>payload=SettingsPayload(data=vote.SerializeToString() action=SettingsPayload.VOTE)<line_sep><return>_make_txn(signer setting_key payload)<block_end><def_stmt>_make_txn signer setting_key payload<block_start>"""Creates and signs a sawtooth_settings transaction with with a payload.
"""<line_sep>serialized_payload=payload.SerializeToString()<line_sep>header=TransactionHeader(signer_public_key=signer.get_public_key().as_hex() family_name='sawtooth_settings' family_version='1.0' inputs=_config_inputs(setting_key) outputs=_config_outputs(setting_key) dependencies=[] payload_sha512=hashlib.sha512(serialized_payload).hexdigest() batcher_public_key=signer.get_public_key().as_hex()).SerializeToString()<line_sep><return>Transaction(header=header header_signature=signer.sign(header) payload=serialized_payload)<block_end><def_stmt>_config_inputs key<block_start>"""Creates the list of inputs for a sawtooth_settings transaction, for a
given setting key.
"""<line_sep><return>[_key_to_address('sawtooth.settings.vote.proposals') _key_to_address('sawtooth.settings.vote.authorized_keys') _key_to_address('sawtooth.settings.vote.approval_threshold') _key_to_address(key)]<block_end><def_stmt>_config_outputs key<block_start>"""Creates the list of outputs for a sawtooth_settings transaction, for a
given setting key.
"""<line_sep><return>[_key_to_address('sawtooth.settings.vote.proposals') _key_to_address(key)]<block_end><def_stmt>_short_hash in_str<block_start><return>hashlib.sha256(in_str.encode()).hexdigest()[:_ADDRESS_PART_SIZE]<block_end><def_stmt>_key_to_address key<block_start>"""Creates the state address for a given setting key.
"""<line_sep>key_parts=key.split('.' maxsplit=_MAX_KEY_PARTS-1)<line_sep>key_parts.extend(['']<times>(_MAX_KEY_PARTS-len(key_parts)))<line_sep><return>SETTINGS_NAMESPACE+''.join(_short_hash(x)<for>x key_parts)<block_end><def_stmt>setting_key_to_address key<block_start><return>_key_to_address(key)<block_end><def_stmt>create_console_handler verbose_level<block_start>clog=logging.StreamHandler()<line_sep>formatter=ColoredFormatter("%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "<concat>"%(white)s%(message)s" datefmt="%H:%M:%S" reset=<true> log_colors={'DEBUG':'cyan' 'INFO':'green' 'WARNING':'yellow' 'ERROR':'red' 'CRITICAL':'red' })<line_sep>clog.setFormatter(formatter)<if_stmt>verbose_level<eq>0<block_start>clog.setLevel(logging.WARN)<block_end><elif_stmt>verbose_level<eq>1<block_start>clog.setLevel(logging.INFO)<block_end><else_stmt><block_start>clog.setLevel(logging.DEBUG)<block_end><return>clog<block_end><def_stmt>setup_loggers verbose_level<block_start>logger=logging.getLogger()<line_sep>logger.setLevel(logging.DEBUG)<line_sep>logger.addHandler(create_console_handler(verbose_level))<block_end><def_stmt>create_parent_parser prog_name<block_start>parent_parser=argparse.ArgumentParser(prog=prog_name add_help=<false>)<line_sep>parent_parser.add_argument('-v' '--verbose' action='count' help='enable more verbose output')<try_stmt><block_start>version=pkg_resources.get_distribution(DISTRIBUTION_NAME).version<block_end><except_stmt>pkg_resources.DistributionNotFound<block_start>version='UNKNOWN'<block_end>parent_parser.add_argument('-V' '--version' action='version' version=(DISTRIBUTION_NAME+' (Hyperledger Sawtooth) version {}').format(version) help='display version information')<line_sep><return>parent_parser<block_end><def_stmt>create_parser prog_name<block_start>parent_parser=create_parent_parser(prog_name)<line_sep>parser=argparse.ArgumentParser(description='Provides subcommands to change genesis block settings '<concat>'and to view, create, and vote on settings proposals.' parents=[parent_parser])<line_sep>subparsers=parser.add_subparsers(title='subcommands' dest='subcommand')<line_sep>subparsers.required=<true><line_sep># The following parser is for the `genesis` subcommand.
# This command creates a batch that contains all of the initial
# transactions for on-chain settings
genesis_parser=subparsers.add_parser('genesis' help='Creates a genesis batch file of settings transactions' description='Creates a Batch of settings proposals that can be '<concat>'consumed by "sawadm genesis" and used '<concat>'during genesis block construction.')<line_sep>genesis_parser.add_argument('-k' '--key' type=str help='specify signing key for resulting batches '<concat>'and initial authorized key')<line_sep>genesis_parser.add_argument('-o' '--output' type=str default='config-genesis.batch' help='specify the output file for the resulting batches')<line_sep>genesis_parser.add_argument('-T' '--approval-threshold' type=int help='set the number of votes required to enable a setting change')<line_sep>genesis_parser.add_argument('-A' '--authorized-key' type=str action='append' help='specify a public key for the user authorized to submit '<concat>'config transactions')<line_sep># The following parser is for the `proposal` subcommand group. These
# commands allow the user to create proposals which may be applied
# immediately or placed in ballot mode, depending on the current on-chain
# settings.
proposal_parser=subparsers.add_parser('proposal' help='Views, creates, or votes on settings change proposals' description='Provides subcommands to view, create, or vote on '<concat>'proposed settings')<line_sep>proposal_parsers=proposal_parser.add_subparsers(title='subcommands' dest='proposal_cmd')<line_sep>proposal_parsers.required=<true><line_sep>prop_parser=proposal_parsers.add_parser('create' help='Creates proposals for setting changes' description='Create proposals for settings changes. The change '<concat>'may be applied immediately or after a series of votes, '<concat>'depending on the vote threshold setting.')<line_sep>prop_parser.add_argument('-k' '--key' type=str help='specify a signing key for the resulting batches')<line_sep>prop_target_group=prop_parser.add_mutually_exclusive_group()<line_sep>prop_target_group.add_argument('-o' '--output' type=str help='specify the output file for the resulting batches')<line_sep>prop_target_group.add_argument('--url' type=str help="identify the URL of a validator's REST API" default='http://localhost:8008')<line_sep>prop_target_group.add_argument('--sabre-output' type=str help='specify an output file to write the settings payload to '<concat>'for the sabre cli')<line_sep>prop_parser.add_argument('setting' type=str nargs='+' help='configuration setting as key/value pair with the '<concat>'format <key>=<value>')<line_sep>proposal_list_parser=proposal_parsers.add_parser('list' help='Lists the currently proposed (not active) settings' description='Lists the currently proposed (not active) settings. '<concat>'Use this list of proposals to find proposals to '<concat>'vote on.')<line_sep>proposal_list_parser.add_argument('--url' type=str help="identify the URL of a validator's REST API" default='http://localhost:8008')<line_sep>proposal_list_parser.add_argument('--public-key' type=str default='' help='filter proposals from a particular public key')<line_sep>proposal_list_parser.add_argument('--filter' type=str default='' help='filter keys that begin with this value')<line_sep>proposal_list_parser.add_argument('--format' default='default' choices=['default' 'csv' 'json' 'yaml'] help='choose the output format')<line_sep>vote_parser=proposal_parsers.add_parser('vote' help='Votes for specific setting change proposals' description='Votes for a specific settings change proposal. Use '<concat>'"sawset proposal list" to find the proposal id.')<line_sep>vote_parser.add_argument('--url' type=str help="identify the URL of a validator's REST API" default='http://localhost:8008')<line_sep>vote_parser.add_argument('-k' '--key' type=str help='specify a signing key for the resulting transaction batch')<line_sep>vote_parser.add_argument('proposal_id' type=str help='identify the proposal to vote on')<line_sep>vote_parser.add_argument('vote_value' type=str choices=['accept' 'reject'] help='specify the value of the vote')<line_sep><return>parser<block_end><def_stmt>main prog_name=os.path.basename(sys.argv[0]) args=<none> with_loggers=<true><block_start>parser=create_parser(prog_name)<if_stmt>args<is><none><block_start>args=sys.argv[1:]<block_end>args=parser.parse_args(args)<if_stmt>with_loggers<is><true><block_start><if_stmt>args.verbose<is><none><block_start>verbose_level=0<block_end><else_stmt><block_start>verbose_level=args.verbose<block_end>setup_loggers(verbose_level=verbose_level)<block_end><if_stmt>args.subcommand<eq>'proposal'<and>args.proposal_cmd<eq>'create'<block_start>_do_config_proposal_create(args)<block_end><elif_stmt>args.subcommand<eq>'proposal'<and>args.proposal_cmd<eq>'list'<block_start>_do_config_proposal_list(args)<block_end><elif_stmt>args.subcommand<eq>'proposal'<and>args.proposal_cmd<eq>'vote'<block_start>_do_config_proposal_vote(args)<block_end><elif_stmt>args.subcommand<eq>'genesis'<block_start>_do_config_genesis(args)<block_end><else_stmt><block_start><raise>CliException('"{}" is not a valid subcommand of "config"'.format(args.subcommand))<block_end><block_end><def_stmt>main_wrapper # pylint: disable=bare-except
<block_start><try_stmt><block_start>main()<block_end><except_stmt>CliException<as>e<block_start>print("Error: {}".format(e) file=sys.stderr)<line_sep>sys.exit(1)<block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><except_stmt>BrokenPipeError<block_start>sys.stderr.close()<block_end><except_stmt>SystemExit<as>e<block_start><raise>e<block_end><except_stmt><block_start>traceback.print_exc(file=sys.stderr)<line_sep>sys.exit(1)<block_end><block_end> |
<import_from_future_stmt> print_function unicode_literals<import_stmt>errno<import_stmt>os<import_stmt>posixpath<import_stmt>re<import_stmt>sys<import_stmt>shutil<import_from_stmt>abc ABCMeta abstractmethod<try_stmt><block_start><import_from_stmt>urlparse urljoin<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib.parse urljoin<block_end><import_stmt>requests<import_from_stmt>flask safe_join<import_from_stmt>.constants STYLE_URLS_SOURCE STYLE_URLS_RES STYLE_ASSET_URLS_RE STYLE_ASSET_URLS_SUB_FORMAT <import_from_stmt>.vendor.six add_metaclass<line_sep>@add_metaclass(ABCMeta)<class_stmt>ReadmeAssetManager(object)<block_start>"""
Manages the style and font assets rendered with Readme pages.
Set cache_path to None to disable caching.
"""<def_stmt>__init__ self cache_path style_urls=<none> quiet=<none><block_start>super(ReadmeAssetManager self).__init__()<line_sep>self.cache_path=cache_path<line_sep>self.style_urls=list(style_urls)<if>style_urls<else>[]<line_sep>self.styles=[]<line_sep>self.quiet=quiet<block_end><def_stmt>_strip_url_params self url<block_start><return>url.rsplit('?' 1)[0].rsplit('#' 1)[0]<block_end><def_stmt>clear self<block_start>"""
Clears the asset cache.
"""<if_stmt>self.cache_path<and>os.path.exists(self.cache_path)<block_start>shutil.rmtree(self.cache_path)<block_end><block_end><def_stmt>cache_filename self url<block_start>"""
Gets a suitable relative filename for the specified URL.
"""<line_sep># FUTURE: Use url exactly instead of flattening it here
url=posixpath.basename(url)<line_sep><return>self._strip_url_params(url)<block_end>@abstractmethod<def_stmt>retrieve_styles self asset_url_path<block_start>"""
Get style URLs from the source HTML page and specified cached asset
URL path.
"""<line_sep><pass><block_end><block_end><class_stmt>GitHubAssetManager(ReadmeAssetManager)<block_start>"""
Reads the styles used for rendering Readme pages.
Set cache_path to None to disable caching.
"""<def_stmt>__init__ self cache_path style_urls=<none> quiet=<none><block_start>super(GitHubAssetManager self).__init__(cache_path style_urls quiet)<block_end><def_stmt>_get_style_urls self asset_url_path<block_start>"""
Gets the specified resource and parses all style URLs and their
assets in the form of the specified patterns.
"""<line_sep># Check cache
<if_stmt>self.cache_path<block_start>cached=self._get_cached_style_urls(asset_url_path)<line_sep># Skip fetching styles if there's any already cached
<if_stmt>cached<block_start><return>cached<block_end><block_end># Find style URLs
r=requests.get(STYLE_URLS_SOURCE)<if_stmt><not>200<le>r.status_code<l>300<block_start>print('Warning: retrieving styles gave status code' r.status_code file=sys.stderr)<block_end>urls=[]<for_stmt>style_urls_re STYLE_URLS_RES<block_start>urls.extend(re.findall(style_urls_re r.text))<block_end><if_stmt><not>urls<block_start>print('Warning: no styles found - see https://github.com/joeyespo/'<concat>'grip/issues/265' file=sys.stderr)<block_end># Cache the styles and their assets
<if_stmt>self.cache_path<block_start>is_cached=self._cache_contents(urls asset_url_path)<if_stmt>is_cached<block_start>urls=self._get_cached_style_urls(asset_url_path)<block_end><block_end><return>urls<block_end><def_stmt>_get_cached_style_urls self asset_url_path<block_start>"""
Gets the URLs of the cached styles.
"""<try_stmt><block_start>cached_styles=os.listdir(self.cache_path)<block_end><except_stmt>IOError<as>ex<block_start><if_stmt>ex.errno<ne>errno.ENOENT<and>ex.errno<ne>errno.ESRCH<block_start><raise><block_end><return>[]<block_end><except_stmt>OSError<block_start><return>[]<block_end><return>[posixpath.join(asset_url_path style)<for>style cached_styles<if>style.endswith('.css')]<block_end><def_stmt>_cache_contents self style_urls asset_url_path<block_start>"""
Fetches the given URLs and caches their contents
and their assets in the given directory.
"""<line_sep>files={}<line_sep>asset_urls=[]<for_stmt>style_url style_urls<block_start><if_stmt><not>self.quiet<block_start>print(' * Downloading style' style_url file=sys.stderr)<block_end>r=requests.get(style_url)<if_stmt><not>200<le>r.status_code<l>300<block_start>print(' -> Warning: Style request responded with' r.status_code file=sys.stderr)<line_sep>files=<none><line_sep><continue><block_end>asset_content=r.text<line_sep># Find assets and replace their base URLs with the cache directory
<for_stmt>url re.findall(STYLE_ASSET_URLS_RE asset_content)<block_start>asset_urls.append(urljoin(style_url url))<block_end>contents=re.sub(STYLE_ASSET_URLS_RE STYLE_ASSET_URLS_SUB_FORMAT.format(asset_url_path.rstrip('/')) asset_content)<line_sep># Prepare cache
<if_stmt>files<is><not><none><block_start>filename=self.cache_filename(style_url)<line_sep>files[filename]=contents.encode('utf-8')<block_end><block_end><for_stmt>asset_url asset_urls<block_start><if_stmt><not>self.quiet<block_start>print(' * Downloading asset' asset_url file=sys.stderr)<block_end># Retrieve binary file and show message
r=requests.get(asset_url stream=<true>)<if_stmt><not>200<le>r.status_code<l>300<block_start>print(' -> Warning: Asset request responded with' r.status_code file=sys.stderr)<line_sep>files=<none><line_sep><continue><block_end># Prepare cache
<if_stmt>files<is><not><none><block_start>filename=self.cache_filename(asset_url)<line_sep>files[filename]=r.raw.read(decode_content=<true>)<block_end><block_end># Skip caching if something went wrong to try again next time
<if_stmt><not>files<block_start><return><false><block_end># Cache files if all downloads were successful
cache={}<for_stmt>relname files<block_start>cache[safe_join(self.cache_path relname)]=files[relname]<block_end><if_stmt><not>os.path.exists(self.cache_path)<block_start>os.makedirs(self.cache_path)<block_end><for_stmt>filename cache<block_start><with_stmt>open(filename 'wb')<as>f<block_start>f.write(cache[filename])<block_end><block_end><if_stmt><not>self.quiet<block_start>print(' * Cached all downloads in' self.cache_path file=sys.stderr)<block_end><return><true><block_end><def_stmt>retrieve_styles self asset_url_path<block_start>"""
Get style URLs from the source HTML page and specified cached
asset base URL.
"""<if_stmt><not>asset_url_path.endswith('/')<block_start>asset_url_path<augadd>'/'<block_end>self.style_urls.extend(self._get_style_urls(asset_url_path))<block_end><block_end> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_from_stmt>botbuilder.core MessageFactory TurnContext<import_from_stmt>botbuilder.schema ChannelAccount<import_from_stmt>.dialog_bot DialogBot<class_stmt>RichCardsBot(DialogBot)<block_start>"""
RichCardsBot prompts a user to select a Rich Card and then returns the card
that matches the user's selection.
"""<def_stmt>__init__ self conversation_state user_state dialog<block_start>super().__init__(conversation_state user_state dialog)<block_end><async_keyword><def_stmt>on_members_added_activity self members_added:ChannelAccount turn_context:TurnContext<block_start><for_stmt>member members_added<block_start><if_stmt>member.id<ne>turn_context.activity.recipient.id<block_start>reply=MessageFactory.text("Welcome to CardBot. "+"This bot will show you different types of Rich Cards. "+"Please type anything to get started.")<line_sep><await>turn_context.send_activity(reply)<block_end><block_end><block_end><block_end> |
<import_stmt>argparse<import_stmt>json<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>log_parser read_results<line_sep>FONT_DICT={'fontsize':20}<line_sep>FIGURE_SIZE=(10 5)<def_stmt>plot_time_per_iter tracks figsize=FIGURE_SIZE title=<none> save_path='time_per_iter.png'<block_start>fig=plt.figure(figsize=figsize)<line_sep>time_per_iters=[]<line_sep>algs=tracks.keys()<for_stmt>alg_name algs<block_start>time_per_iter_alg=[]<for_stmt>track tracks[alg_name]# aggregating statistic over different tracks
<block_start>time_per_iter=track.get_time_per_iter()<line_sep>time_per_iter_alg.extend(time_per_iter)<block_end>time_per_iters.append(time_per_iter_alg)<block_end><if_stmt>title<is><not><none><block_start>plt.title(title FONT_DICT)<block_end><for_stmt>i,alg_name enumerate(algs)<block_start>print(alg_name)<line_sep>print(np.median(time_per_iters[i]))<block_end>plt.ylabel('Seconds' FONT_DICT)<line_sep>plt.boxplot(time_per_iters labels=algs)<if_stmt>os.path.exists(save_path)<block_start>print('WARNING: file '+save_path+' already exists')<block_end>plt.savefig(save_path dpi=100)<line_sep>plt.close(fig)<block_end><def_stmt>plot_quality tracks from_iter to_iter figsize=FIGURE_SIZE title=<none> save_path='quality.png'<block_start>fig=plt.figure(figsize=figsize)<if_stmt>title<is><not><none><block_start>plt.title(title FONT_DICT)<block_end>flat_tracks=[]<for_stmt>alg tracks.keys()<block_start>flat_tracks<augadd>tracks[alg]<block_end>first_track=flat_tracks[0]<line_sep>task_type=first_track.task_type<line_sep>metric='Error'<if>task_type<eq>'Classification'<or>task_type<eq>'Multiclass'<else>'RMSE'<line_sep>plt.xlabel('iteration' FONT_DICT)<line_sep>plt.ylabel(metric FONT_DICT)<line_sep>lines=[]<line_sep>names=[]<for_stmt>track flat_tracks<block_start>_,values=track.get_series()<line_sep>cur_to_iter=to_iter<if_stmt>to_iter<is><none><or>to_iter<g>track.get_fit_iterations()<block_start>cur_to_iter=track.get_fit_iterations()<block_end>values=values[from_iter:cur_to_iter]<line_sep>x_values=np.arange(from_iter cur_to_iter)<line_sep>line,=plt.plot(x_values values)<line_sep>lines.append(line)<line_sep>names.append(str(track))<block_end>plt.legend(lines names prop={'size':9})<if_stmt>os.path.exists(save_path)<block_start>print('WARNING: file '+save_path+' already exists')<block_end>plt.savefig(save_path dpi=100)<line_sep>plt.close(fig)<block_end><def_stmt>plot_quality_vs_time tracks best_quality low_percent=0.8 num_bins=100 only_min=<false> figsize=FIGURE_SIZE title=<none> save_path='time_distr.png'<block_start>fig=plt.figure(figsize=figsize)<if_stmt>title<is><not><none><block_start>plt.title(title FONT_DICT)<block_end>plt.xlabel('Quality (%)' FONT_DICT)<line_sep>plt.ylabel('Time to obtain (sec)' FONT_DICT)<line_sep>algs=tracks.keys()<line_sep>up_percent=1.-low_percent<for_stmt>i,alg_name enumerate(algs)<block_start>bins=[[]<for>j range(num_bins+1)]<for_stmt>track tracks[alg_name]<block_start>time_series,values=track.get_series()<line_sep>time_series=time_series-time_series[0]<for_stmt>time,value zip(time_series values)<block_start>percent=value/best_quality-1.<if_stmt>percent<g>up_percent<block_start><continue><block_end>idx=int(np.round(num_bins<times>percent/up_percent))<line_sep>bins[idx].append(time)<block_end><block_end>time_median=[]<line_sep>time_q2=[]<line_sep>time_min=[]<line_sep>x_values=[]<for_stmt>k,times enumerate(bins)<block_start><if_stmt>len(times)<g>0<block_start>time_median.append(np.median(times))<line_sep>time_q2.append(np.quantile(times 0.75))<line_sep>time_min.append(np.min(times))<line_sep>x_values.append(float(k)/num_bins<times>up_percent)<block_end><block_end>cur_min=time_min[0]<for_stmt>t range(1 len(time_min))<block_start><if_stmt>time_min[t]<g>cur_min<block_start>time_min[t]=cur_min<block_end><else_stmt><block_start>cur_min=time_min[t]<block_end><block_end>error_plus=np.array(time_q2)-np.array(time_median)<line_sep>error_minus=np.array(time_median)-np.array(time_min)<line_sep>x_values=np.array(x_values)-(float(i)-1.)<times>up_percent/num_bins/4.<line_sep>x_values=1.-x_values<if_stmt>only_min<block_start>plt.plot(x_values time_min label=alg_name)<block_end><else_stmt><block_start>plt.errorbar(x=x_values y=time_median yerr=[error_minus error_plus] fmt='o-' barsabove=<true> capsize=2 linewidth=2 label=alg_name)<block_end><block_end>plt.legend(fontsize='large')<if_stmt>os.path.exists(save_path)<block_start>print('WARNING: file '+save_path+' already exists')<block_end>plt.savefig(save_path dpi=100)<line_sep>plt.close(fig)<block_end><def_stmt>params_to_str params<block_start><return>''.join(map(<lambda>(key value):'{}{}'.format(key str(value)) params.items()))<block_end><def_stmt>get_best tracks top=1<block_start>algorithms=tracks.keys()<line_sep>best_tracks={}<for_stmt>algorithm_name algorithms<block_start>best_scores=map(<lambda>track:track.get_best_score() tracks[algorithm_name])<line_sep>idx_best=np.argsort(best_scores)[:top]<line_sep>best_tracks[algorithm_name]=map(<lambda>idx:tracks[algorithm_name][idx] idx_best)<block_end><return>best_tracks<block_end><def_stmt>filter_tracks tracks params_cases<block_start>filtered_tracks={}<for_stmt>alg tracks.keys()<block_start>filtered_tracks[alg]=[]<for_stmt>track tracks[alg]<block_start><if_stmt>all([track.params_dict[param_name]<in>params_cases[param_name]<for>param_name params_cases.keys()])<block_start>filtered_tracks[alg].append(track)<block_end><block_end><block_end><return>filtered_tracks<block_end>ONLY_TYPES={'cat-cpu':['catboost-CPU'] 'xgb-cpu':['xgboost-CPU'] 'lgb-cpu':['lightgbm-CPU'] 'cat-gpu':['catboost-GPU'] 'xgb-gpu':['xgboost-GPU'] 'lgb-gpu':['lightgbm-GPU'] 'cat':['catboost-CPU' 'catboost-GPU'] 'xgb':['xgboost-CPU' 'xgboost-GPU'] 'lgb':['lightgbm-CPU' 'lightgbm-GPU'] 'cpu':['catboost-CPU' 'xgboost-CPU' 'lightgbm-CPU'] 'gpu':['catboost-GPU' 'xgboost-GPU' 'lightgbm-GPU']}<def_stmt>get_default_file_name plot_type params<block_start>default_file_names={'best':'best_quality.png' 'quality-vs-time':'quality_vs_time.png' 'time-per-iter':'time_per_iter.png'}<if_stmt>plot_type<in>default_file_names.keys()<block_start><return>default_file_names[plot_type]<block_end><if_stmt>plot_type<eq>'custom'<block_start><return>params_to_str(params)+'.png'<block_end><block_end><def_stmt>plot_experiment tracks experiment_name args<block_start>file_name=args.file_name<if>args.file_name<else>get_default_file_name(args.type args.params_cases)<line_sep>save_dir=os.path.join(args.out_dir experiment_name)<if_stmt><not>os.path.exists(save_dir)<block_start>os.makedirs(save_dir)<block_end>save_path=os.path.join(save_dir file_name)<if_stmt>args.only<block_start>filtered_tracks={}<for_stmt>only_type args.only<block_start><for_stmt>alg_name ONLY_TYPES[only_type]<block_start>filtered_tracks[alg_name]=tracks[alg_name]<block_end><block_end>tracks=filtered_tracks<block_end><if_stmt>args.params_cases<block_start><with_stmt>open(args.params_cases)<as>f<block_start>params_cases=json.load(f)<block_end>tracks=filter_tracks(tracks params_cases)<block_end><if_stmt>args.type<eq>'quality-vs-time'<block_start>best_tracks=get_best(tracks)<line_sep>best_quality=min(map(<lambda>tracks:tracks[0].get_best_score() best_tracks.values()))<line_sep>print(best_quality)<if_stmt>args.top<block_start>tracks=get_best(tracks top=args.top)<block_end>plot_quality_vs_time(tracks best_quality=best_quality low_percent=args.low_percent only_min=args.only_min figsize=args.fig_size num_bins=args.num_bins save_path=save_path)<block_end><if_stmt>args.type<eq>'best'<block_start>best_tracks=get_best(tracks top=args.top)<for_stmt>alg best_tracks.keys()<block_start><for_stmt>track best_tracks[alg]<block_start>print(track)<line_sep>print(track.get_best_score())<block_end><block_end>plot_quality(best_tracks args.from_iter args.to_iter figsize=args.fig_size title=args.title save_path=save_path)<block_end><if_stmt>args.type<eq>'custom'<block_start>plot_quality(tracks args.from_iter args.to_iter figsize=args.fig_size title=args.title save_path=save_path)<block_end><if_stmt>args.type<eq>'time-per-iter'<block_start>plot_time_per_iter(tracks figsize=args.fig_size title=args.title save_path=save_path)<block_end><block_end><def_stmt>main <block_start>plot_functions={'time-per-iter':plot_time_per_iter 'best':plot_quality 'quality-vs-time':plot_quality_vs_time 'custom':plot_quality}<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--type' choices=plot_functions.keys() required=<true>)<line_sep>parser.add_argument('--only' nargs='+' choices=ONLY_TYPES.keys() required=<false>)<line_sep>parser.add_argument('-i' '--results-file' required=<true>)<line_sep>parser.add_argument('-t' '--title')<line_sep>parser.add_argument('-f' '--fig-size' nargs=2 type=int default=FIGURE_SIZE)<line_sep>parser.add_argument('-o' '--out-dir' default='plots')<line_sep>parser.add_argument('--params-cases' help='draw plots only with those params (tracks filtering)'<concat>' path to json file, each line corresponds to learner '<concat>'parameter (e.g. max_depth) and list of its values')<line_sep>parser.add_argument('--from-iter' type=int default=0 help='only custom, best modes')<line_sep>parser.add_argument('--to-iter' type=int default=<none> help='only custom, best modes')<line_sep>parser.add_argument('--low-percent' type=float default=0.9 help='only quality-vs-time mode')<line_sep>parser.add_argument('--num-bins' type=int default=200 help='only quality-vs-time mode')<line_sep>parser.add_argument('--only-min' action='store_true' help='only quality-vs-time mode')<line_sep>parser.add_argument('--top' type=int default=3 help='only best mode')<line_sep>args=parser.parse_args()<line_sep>tracks=read_results(args.results_file)<for_stmt>experiment_name tracks<block_start>plot_experiment(tracks[experiment_name] experiment_name args)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_stmt>sys<line_sep>sys.path.insert(1 "../../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_from_stmt>h2o.estimators.rulefit H2ORuleFitEstimator<def_stmt>titanic <block_start>df=h2o.import_file(pyunit_utils.locate("smalldata/gbm_test/titanic.csv") col_types={'pclass':"enum" 'survived':"enum"})<line_sep>x=["age" "sibsp" "parch" "fare" "sex" "pclass"]<line_sep># Split the dataset into train and test
train,test=df.split_frame(ratios=[.8] seed=1234)<line_sep>rfit=H2ORuleFitEstimator(min_rule_length=4 max_rule_length=5 max_num_rules=3 seed=1234 model_type="rules")<line_sep>rfit.train(training_frame=train x=x y="survived" validation_frame=test)<assert_stmt>rfit.rmse(valid=<true>)<is><not><none> "validation metrics should be present"<line_sep>print(rfit.rule_importance())<assert_stmt>rfit._model_json["output"]["model_summary"]<is><not><none> "model_summary should be present"<assert_stmt>len(rfit._model_json["output"]["model_summary"]._cell_values)<g>0 "model_summary's content should be present"<line_sep>rfit_predictions=rfit.predict(test)<import_stmt>tempfile<line_sep>tmpdir=tempfile.mkdtemp()<try_stmt><block_start>mojo_path=rfit.save_mojo(tmpdir)<line_sep>mojo_model=h2o.upload_mojo(mojo_path)<block_end><finally_stmt><block_start><import_stmt>shutil<line_sep>shutil.rmtree(tmpdir)<block_end>mojo_predictions=mojo_model.predict(test)<assert_stmt>pyunit_utils.compare_frames(rfit_predictions mojo_predictions 0)<block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(titanic)<block_end><else_stmt><block_start>titanic()<block_end> |
<import_stmt>torch<import_stmt>numpy<as>np<def_stmt>mpjpe predicted target<block_start>"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""<assert_stmt>predicted.shape<eq>target.shape<line_sep><return>torch.mean(torch.norm(predicted-target dim=len(target.shape)-1))<block_end><def_stmt>weighted_mpjpe predicted target w<block_start>"""
Weighted mean per-joint position error (i.e. mean Euclidean distance)
"""<assert_stmt>predicted.shape<eq>target.shape<assert_stmt>w.shape[0]<eq>predicted.shape[0]<line_sep><return>torch.mean(w<times>torch.norm(predicted-target dim=len(target.shape)-1))<block_end><def_stmt>p_mpjpe_torch predicted target with_sRt=<false> full_torch=<false> with_aligned=<false><block_start>"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""<assert_stmt>predicted.shape<eq>target.shape<line_sep>muX=torch.mean(target dim=1 keepdim=<true>)<line_sep>muY=torch.mean(predicted dim=1 keepdim=<true>)<line_sep>X0=target-muX<line_sep>Y0=predicted-muY<line_sep>X0[X0<power>2<l>1e-6]=1e-3<line_sep>normX=torch.sqrt(torch.sum(X0<power>2 dim=(1 2) keepdim=<true>))<line_sep>normY=torch.sqrt(torch.sum(Y0<power>2 dim=(1 2) keepdim=<true>))<line_sep>normX[normX<l>1e-3]=1e-3<line_sep>X0<augdiv>normX<line_sep>Y0<augdiv>normY<line_sep>H=torch.matmul(X0.transpose(1 2) Y0)<if_stmt>full_torch<block_start>U,s,V=batch_svd(H)<block_end><else_stmt><block_start>U,s,Vt=np.linalg.svd(H.cpu().numpy())<line_sep>V=torch.from_numpy(Vt.transpose(0 2 1)).cuda()<line_sep>U=torch.from_numpy(U).cuda()<line_sep>s=torch.from_numpy(s).cuda()<block_end>R=torch.matmul(V U.transpose(2 1))<line_sep># Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR=torch.sign(torch.unsqueeze(torch.det(R[0]) 0))<line_sep>V[: : -1]<augmul>sign_detR.unsqueeze(0)<line_sep>s[: -1]<augmul>sign_detR.flatten()<line_sep>R=torch.matmul(V U.transpose(2 1))# Rotation
tr=torch.unsqueeze(torch.sum(s dim=1 keepdim=<true>) 2)<line_sep>a=tr<times>normX/normY# Scale
t=muX-a<times>torch.matmul(muY R)# Translation
<if_stmt>(a<ne>a).sum()<g>0<block_start>print('NaN Error!!')<line_sep>print('UsV:' U s V)<line_sep>print('aRt:' a R t)<block_end>a[a<ne>a]=1.<line_sep>R[R<ne>R]=0.<line_sep>t[t<ne>t]=0.<line_sep># Perform rigid transformation on the input
predicted_aligned=a<times>torch.matmul(predicted R)+t<if_stmt>with_sRt<block_start><return>torch.sqrt(((predicted_aligned-target)<power>2).sum(-1)).mean() (a R t)#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))
<block_end><if_stmt>with_aligned<block_start><return>torch.sqrt(((predicted_aligned-target)<power>2).sum(-1)).mean() predicted_aligned<block_end># Return MPJPE
<return>torch.sqrt(((predicted_aligned-target)<power>2).sum(-1)).mean()<block_end>#torch.mean(torch.norm(predicted_aligned - target, dim=len(target.shape)-1))#,(a,R,t),predicted_aligned
<def_stmt>batch_svd H<block_start>num=H.shape[0]<line_sep>U_batch,s_batch,V_batch=[] [] []<for_stmt>i range(num)<block_start>U,s,V=H[i].svd(some=<false>)<line_sep>U_batch.append(U.unsqueeze(0))<line_sep>s_batch.append(s.unsqueeze(0))<line_sep>V_batch.append(V.unsqueeze(0))<block_end><return>torch.cat(U_batch 0) torch.cat(s_batch 0) torch.cat(V_batch 0)<block_end><def_stmt>p_mpjpe predicted target with_sRt=<false> full_torch=<false> with_aligned=<false> each_separate=<false><block_start>"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""<assert_stmt>predicted.shape<eq>target.shape<line_sep>muX=np.mean(target axis=1 keepdims=<true>)<line_sep>muY=np.mean(predicted axis=1 keepdims=<true>)<line_sep>X0=target-muX<line_sep>Y0=predicted-muY<line_sep>normX=np.sqrt(np.sum(X0<power>2 axis=(1 2) keepdims=<true>))<line_sep>normY=np.sqrt(np.sum(Y0<power>2 axis=(1 2) keepdims=<true>))<line_sep>X0<augdiv>(normX+1e-6)<line_sep>Y0<augdiv>(normY+1e-6)<line_sep>H=np.matmul(X0.transpose(0 2 1) Y0).astype(np.float16).astype(np.float64)<line_sep>U,s,Vt=np.linalg.svd(H)<line_sep>V=Vt.transpose(0 2 1)<line_sep>R=np.matmul(V U.transpose(0 2 1))<line_sep># Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR=np.sign(np.expand_dims(np.linalg.det(R) axis=1))<line_sep>V[: : -1]<augmul>sign_detR<line_sep>s[: -1]<augmul>sign_detR.flatten()<line_sep>R=np.matmul(V U.transpose(0 2 1))# Rotation
tr=np.expand_dims(np.sum(s axis=1 keepdims=<true>) axis=2)<line_sep>a=tr<times>normX/normY# Scale
t=muX-a<times>np.matmul(muY R)# Translation
# Perform rigid transformation on the input
predicted_aligned=a<times>np.matmul(predicted R)+t<if_stmt>each_separate<block_start><return>np.linalg.norm(predicted_aligned-target axis=len(target.shape)-1)<block_end>error=np.mean(np.linalg.norm(predicted_aligned-target axis=len(target.shape)-1))<if_stmt>with_sRt<and><not>with_aligned<block_start><return>error (a R t)<block_end><if_stmt>with_aligned<block_start><return>error (a R t) predicted_aligned<block_end># Return MPJPE
<return>error<block_end><def_stmt>n_mpjpe predicted target<block_start>"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""<assert_stmt>predicted.shape<eq>target.shape<line_sep>norm_predicted=torch.mean(torch.sum(predicted<power>2 dim=3 keepdim=<true>) dim=2 keepdim=<true>)<line_sep>norm_target=torch.mean(torch.sum(target<times>predicted dim=3 keepdim=<true>) dim=2 keepdim=<true>)<line_sep>scale=norm_target/norm_predicted<line_sep><return>mpjpe(scale<times>predicted target)<block_end><def_stmt>mean_velocity_error predicted target<block_start>"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""<assert_stmt>predicted.shape<eq>target.shape<line_sep>velocity_predicted=np.diff(predicted axis=0)<line_sep>velocity_target=np.diff(target axis=0)<line_sep><return>np.mean(np.linalg.norm(velocity_predicted-velocity_target axis=len(target.shape)-1))<block_end><def_stmt>test <block_start>r1=np.random.rand(3 14 3)<line_sep>r2=np.random.rand(3 14 3)<line_sep>pmpjpe=p_mpjpe(r1 r2 with_sRt=<false>)<line_sep>pmpjpe_torch=p_mpjpe_torch(torch.from_numpy(r1) torch.from_numpy(r2) with_sRt=<false> full_torch=<true>)<line_sep>print('pmpjpe: {}; {:.6f}; {:.6f}; {:.6f}'.format(np.abs(pmpjpe-pmpjpe_torch.numpy())<l>0.01 pmpjpe pmpjpe_torch.numpy() pmpjpe-pmpjpe_torch.numpy()))<block_end><if_stmt>__name__<eq>'__main__'<block_start>test()<block_end> |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>.optimizer AcceleratedOptimizer<import_from_stmt>.state is_apex_available is_deepspeed_available<if_stmt>is_deepspeed_available()<block_start><import_from_stmt>deepspeed DeepSpeedEngine<block_end><if_stmt>is_apex_available()<block_start><import_from_stmt>apex amp<block_end><class_stmt>DeepSpeedEngineWrapper(DeepSpeedEngine)<block_start>"""
Wrapper over deepspeed.DeepSpeedEngine object
"""<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep># overwriting micro_steps for user's gradient_accumulation
self.micro_steps=-1<block_end><def_stmt>step self lr_kwargs=<none><block_start>"""DeepSpeedEngine.step() without `micro_steps` update & no profiling"""<if_stmt>self.is_gradient_accumulation_boundary()# it shouldn't matter whether we keep this line or not
<block_start><if_stmt>self.progressive_layer_drop<block_start>self.progressive_layer_drop.update_state(self.global_steps)<block_end>self._take_model_step(lr_kwargs)<block_end><block_end><def_stmt>backward self loss<block_start>"""DeepSpeedEngine.backward() with with no loss scaling; no profiling but with `micro_steps` update"""<if_stmt>self.zero_optimization()<block_start>self.optimizer.is_gradient_accumulation_boundary=self.is_gradient_accumulation_boundary()<line_sep>self.optimizer.backward(loss)<block_end><elif_stmt>self.amp_enabled()# AMP requires delaying unscale when inside gradient accumulation boundaries
# https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations
<block_start>delay_unscale=<not>self.is_gradient_accumulation_boundary()<with_stmt>amp.scale_loss(loss self.optimizer delay_unscale=delay_unscale)<as>scaled_loss<block_start>scaled_loss.backward()<block_end><block_end><elif_stmt>self.fp16_enabled()<block_start>self.optimizer.backward(loss)<block_end><else_stmt><block_start>loss.backward()<block_end><if_stmt>self.enable_backward_allreduce<block_start>self.allreduce_gradients()<block_end># this will ensure deepspeed gradient_accumulation matches user's accumulation
self.micro_steps<augadd>1<block_end><block_end><class_stmt>DeepSpeedOptimizerWrapper(AcceleratedOptimizer)<block_start>"""
Internal wrapper around a deepspeed optimizer.
Args:
optimizer (:obj:`torch.optim.optimizer.Optimizer`):
The optimizer to wrap.
"""<def_stmt>__init__ self optimizer model:DeepSpeedEngineWrapper<block_start>super().__init__(optimizer device_placement=<false> scaler=<none>)<line_sep>self.model=model<block_end><def_stmt>zero_grad self set_to_none=<none><block_start><pass><block_end># `model.step()` is doing that automatically. Therefore, it's implementation is not needed
<def_stmt>step self<block_start>"""This will handle optimizer.step() & optimizer.zero_grad() with gradient_accumulation"""<line_sep>self.model.step()<block_end>@property<def_stmt>is_overflow self<block_start>"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""<line_sep>overflow=<false><if_stmt>hasattr(self.optimizer "overflow")<block_start>overflow=self.optimizer.overflow<block_end><return>overflow<block_end><block_end> |
<import_from_stmt>paddle.io Dataset<import_from_stmt>paddle.io DataLoader<import_from_stmt>paddle.vision datasets<import_from_stmt>paddle.vision transforms<def_stmt>get_transforms mode='train'<block_start><if_stmt>mode<eq>'train'<block_start>data_transforms=transforms.Compose([transforms.RandomCrop(32 padding=4) transforms.RandomHorizontalFlip() transforms.ToTensor() transforms.Normalize(mean=[0.4914 0.4822 0.4465] std=[0.2023 0.1994 0.2010])])<block_end><else_stmt><block_start>data_transforms=transforms.Compose([transforms.ToTensor() transforms.Normalize(mean=[0.4914 0.4822 0.4465] std=[0.2023 0.1994 0.2010])])<block_end><return>data_transforms<block_end><def_stmt>get_dataset name='cifar10' mode='train'<block_start><if_stmt>name<eq>'cifar10'<block_start>dataset=datasets.Cifar10(mode=mode transform=get_transforms(mode))<block_end><return>dataset<block_end><def_stmt>get_dataloader dataset batch_size=128 mode='train'<block_start>dataloader=DataLoader(dataset batch_size=batch_size num_workers=2 shuffle=(mode<eq>'train'))<line_sep><return>dataloader<block_end> |
<import_stmt>re<line_sep>person="xx{{\"asdasd\"+\"lala\"}} }} {1+1}xxx"<line_sep>regex=r"{{(.*?)}}"<line_sep>matches=re.finditer(regex person re.MULTILINE)<for_stmt>matchNum,match enumerate(matches)<block_start>eval_result=eval(match.group(1))<line_sep>person=person.replace(str(match.group()) str(eval_result))<block_end>print(person)<line_sep> |
<import_from_stmt>django.conf settings<import_from_stmt>django forms<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.contrib.auth.tokens default_token_generator<import_from_stmt>django.contrib.sites.models get_current_site<import_from_stmt>django.core.validators email_re<import_from_stmt>django.template Context loader<import_from_stmt>django.utils.http int_to_base36<import_from_stmt>django.utils.safestring mark_safe<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>paypal.standard.conf *<import_from_stmt>paypal.standard.forms PayPalPaymentsForm<import_from_stmt>social_auth.models UserSocialAuth<import_from_stmt>accounts.models AccountLevel UserProfile<class_stmt>UsernameChangeForm(forms.ModelForm)<block_start>"""
Update username form
"""<def_stmt>__init__ self *args **kwargs<block_start>super(UsernameChangeForm self).__init__(*args **kwargs)<for_stmt>myField self.fields<block_start>self.fields[myField].widget.attrs['class']='input-text'<block_end><block_end><class_stmt>Meta<block_start>model=User<line_sep>fields=('username' )<block_end><block_end><class_stmt>SetPasswordForm(forms.Form)<block_start>"""
A form that lets a user change set his/her password without
entering the old password
"""<line_sep>new_password1=forms.CharField(label=_("New password") widget=forms.PasswordInput)<line_sep>new_password2=forms.CharField(label=_("New password confirmation") widget=forms.PasswordInput)<def_stmt>__init__ self user *args **kwargs<block_start>self.user=user<line_sep>super(SetPasswordForm self).__init__(*args **kwargs)<for_stmt>myField self.fields<block_start>self.fields[myField].widget.attrs['class']='input-text'<block_end><block_end><def_stmt>clean_new_password2 self<block_start>password1=self.cleaned_data.get('new_password1')<line_sep>password2=self.cleaned_data.get('new_password2')<if_stmt>password1<and>password2<block_start><if_stmt>password1<ne>password2<block_start><raise>forms.ValidationError(_("The two password fields didn't match."))<block_end><block_end><return>password2<block_end><def_stmt>save self commit=<true><block_start>self.user.set_password(self.cleaned_data['new_password1'])<if_stmt>commit<block_start>self.user.save()<block_end><return>self.user<block_end><block_end><class_stmt>PasswordChangeForm(SetPasswordForm)<block_start>"""
A form that lets a user change his/her password by entering
their old password.
"""<line_sep>old_password=forms.CharField(label=_("Old password") widget=forms.PasswordInput)<def_stmt>__init__ self *args **kwargs<block_start>super(PasswordChangeForm self).__init__(*args **kwargs)<for_stmt>myField self.fields<block_start>self.fields[myField].widget.attrs['class']='input-text'<block_end><block_end><def_stmt>clean_old_password self<block_start>"""
Validates that the old_password field is correct.
"""<line_sep>old_password=self.cleaned_data["old_password"]<if_stmt><not>self.user.check_password(old_password)<block_start><raise>forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))<block_end><return>old_password<block_end><block_end>PasswordChangeForm.base_fields.keyOrder=['old_password' '<PASSWORD>' '<PASSWORD>']<class_stmt>UserProfileUpdateForm(forms.ModelForm)<block_start>"""
Update nickname form
"""<def_stmt>__init__ self *args **kwargs<block_start>super(UserProfileUpdateForm self).__init__(*args **kwargs)<for_stmt>myField self.fields<block_start>self.fields[myField].widget.attrs['class']='input-text'<block_end><block_end><class_stmt>Meta<block_start>model=UserProfile<line_sep>exclude=('user' 'account_level')<line_sep>fields=('nickname' )<block_end><block_end><class_stmt>PasswordResetForm(forms.Form)<block_start>email_username=forms.CharField(label=_("E-mail or Username") max_length=75)<def_stmt>__init__ self *args **kwargs<block_start>super(PasswordResetForm self).__init__(*args **kwargs)<for_stmt>myField self.fields<block_start>self.fields[myField].widget.attrs['class']='input-text'<block_end><block_end><def_stmt>clean_email_username self<block_start>"""
Validates that an active user exists with the given e-mail address or username
"""<line_sep>email_username=self.cleaned_data["email_username"]<if_stmt>email_re.search(email_username)<block_start><try_stmt><block_start>self.users_cache=list(User.objects.filter(email__iexact=email_username is_active=<true>))<block_end><except_stmt>User.DoesNotExist<block_start><pass><block_end><block_end><else_stmt><block_start><try_stmt><block_start>self.users_cache=list(User.objects.filter(username__iexact=email_username is_active=<true>))<block_end><except_stmt>User.DoesNotExist<block_start><pass><block_end><block_end># Allow user to reset password even if registered from a social networking site
<for_stmt>user self.users_cache<block_start><try_stmt><block_start>oauth_user=UserSocialAuth.objects.get(user=user)<line_sep><raise>forms.ValidationError(_("Your Screenbird account is based off of either Google or Facebook. To login with either of those, please use one of these links:"))<block_end><except_stmt>UserSocialAuth.DoesNotExist<block_start>oauth_user=<none><block_end><block_end><if_stmt>len(self.users_cache)<eq>0<block_start><raise>forms.ValidationError(_("That e-mail address or username doesn't have an associated user account. Are you sure you've registered?"))<block_end><return>email_username<block_end><def_stmt>save self domain_override=<none> email_template_name='registration/password_reset_email.html' use_https=<false> token_generator=default_token_generator from_email=<none> request=<none><block_start>"""
Generates a one-use only link for resetting password and sends to the user
"""<import_from_stmt>django.core.mail send_mail<for_stmt>user self.users_cache<block_start><if_stmt><not>domain_override<block_start>current_site=get_current_site(request)<line_sep>site_name=current_site.name<line_sep>domain=current_site.domain<block_end><else_stmt><block_start>site_name=domain=domain_override<block_end>t=loader.get_template(email_template_name)<line_sep>c={'email':user.email 'domain':domain 'site_name':site_name 'uid':int_to_base36(user.id) 'user':user 'token':token_generator.make_token(user) 'protocol':use_https<and>'https'<or>'http' }<line_sep>send_mail(_("Password reset on %s")%site_name t.render(Context(c)) from_email [user.email] fail_silently=<false>)<block_end><block_end><block_end><class_stmt>PayPalPaymentsForm(PayPalPaymentsForm)<block_start>'''Extended django-paypals PayPalPaymentsForm to customize button image and render
'''<line_sep>MONTHLY_IMAGE=settings.MEDIA_URL+'gfx/premium_button%201.png'<line_sep>YEARLY_IMAGE=settings.MEDIA_URL+'gfx/premium_button%202.png'<line_sep>PASTEVID_MONTHLY='pastevid_monthly'<line_sep>PASTEVID_YEARLY='pastevid_yearly'<def_stmt>render self<block_start><if_stmt>settings.SITE_ID<eq>2<block_start><if_stmt>self.button_type<eq>self.PASTEVID_MONTHLY<block_start>link_text="Monthly"<line_sep>tagline="$9/month"<block_end><else_stmt><block_start>link_text="Yearly"<line_sep>tagline="$99/year"<block_end>rendered_form=mark_safe(u"""<form action="%s" method="post" id="%s">
%s
<a href="javascript:{}" style="text-align:center;" class="buy_now" onclick="document.getElementById('%s').submit(); return false;">%s</a><div class="tagline">%s</div><br><br>
</form>"""%(POSTBACK_ENDPOINT self.button_type self.as_p() self.button_type link_text tagline))<block_end><else_stmt><block_start>rendered_form=mark_safe(u"""<form action="%s" method="post" id="%s">
%s
<input type="image" src="%s" border="0" name="submit" alt="Buy it Now" />
</form>"""%(POSTBACK_ENDPOINT self.button_type self.as_p() self.get_image()))<block_end><return>rendered_form<block_end><def_stmt>get_image self<block_start><return>{(<true> self.PASTEVID_MONTHLY):self.MONTHLY_IMAGE (<true> self.PASTEVID_YEARLY):self.YEARLY_IMAGE (<true> self.SUBSCRIBE):SUBSCRIPTION_SANDBOX_IMAGE (<true> self.BUY):SANDBOX_IMAGE (<true> self.DONATE):DONATION_SANDBOX_IMAGE (<false> self.PASTEVID_MONTHLY):self.MONTHLY_IMAGE (<false> self.PASTEVID_YEARLY):self.YEARLY_IMAGE (<false> self.SUBSCRIBE):SUBSCRIPTION_IMAGE (<false> self.BUY):IMAGE (<false> self.DONATE):DONATION_IMAGE }[TEST self.button_type]<block_end><block_end><class_stmt>PaymentInformationForm(forms.Form)<block_start>"""
A form that lets users enter their payment information to be used with
Authorize.net
Note: Authorize.net payment option is currently on backlog
"""<line_sep>card_number=forms.CharField(required=<true> max_length=16)<line_sep>expiry_date=forms.DateField(required=<true> widget=forms.widgets.DateInput(format="%m/%d/%Y"))<line_sep>card_code=forms.CharField(required=<true> max_length=10)<line_sep>first_name=forms.CharField(required=<false> max_length=30)<line_sep>last_name=forms.CharField(required=<false> max_length=30)<line_sep>company=forms.CharField(required=<false> max_length=150)<line_sep>address=forms.CharField(required=<false> max_length=150)<line_sep>city=forms.CharField(required=<false> max_length=150)<line_sep>state=forms.CharField(required=<false> max_length=150)<line_sep>province=forms.CharField(required=<false> max_length=150)<line_sep>country=forms.CharField(required=<false> max_length=150)<line_sep>zip_code=forms.CharField(required=<false> max_length=150)<line_sep>email=forms.EmailField(required=<false>)<line_sep>phone=forms.CharField(required=<false> max_length=15)<block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['LinkedServiceArgs' 'LinkedService']<line_sep>@pulumi.input_type<class_stmt>LinkedServiceArgs<block_start><def_stmt>__init__ __self__ * resource_group_name:pulumi.Input[str] linked_service_name:Optional[pulumi.Input[str]]=<none> read_access_id:Optional[pulumi.Input[str]]=<none> resource_id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> workspace_id:Optional[pulumi.Input[str]]=<none> workspace_name:Optional[pulumi.Input[str]]=<none> write_access_id:Optional[pulumi.Input[str]]=<none><block_start>"""
The set of arguments for constructing a LinkedService resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep>pulumi.set(__self__ "resource_group_name" resource_group_name)<if_stmt>linked_service_name<is><not><none><block_start>warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")<block_end><if_stmt>linked_service_name<is><not><none><block_start>pulumi.set(__self__ "linked_service_name" linked_service_name)<block_end><if_stmt>read_access_id<is><not><none><block_start>pulumi.set(__self__ "read_access_id" read_access_id)<block_end><if_stmt>resource_id<is><not><none><block_start>warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")<block_end><if_stmt>resource_id<is><not><none><block_start>pulumi.set(__self__ "resource_id" resource_id)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><if_stmt>workspace_id<is><not><none><block_start>pulumi.set(__self__ "workspace_id" workspace_id)<block_end><if_stmt>workspace_name<is><not><none><block_start>warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")<block_end><if_stmt>workspace_name<is><not><none><block_start>pulumi.set(__self__ "workspace_name" workspace_name)<block_end><if_stmt>write_access_id<is><not><none><block_start>pulumi.set(__self__ "write_access_id" write_access_id)<block_end><block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@[email protected](name="linkedServiceName")<def_stmt>linked_service_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "linked_service_name")<block_end>@linked_service_name.setter<def_stmt>linked_service_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "linked_service_name" value)<block_end>@[email protected](name="readAccessId")<def_stmt>read_access_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "read_access_id")<block_end>@read_access_id.setter<def_stmt>read_access_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "read_access_id" value)<block_end>@[email protected](name="resourceId")<def_stmt>resource_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "resource_id")<block_end>@resource_id.setter<def_stmt>resource_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_id" value)<block_end>@[email protected]<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end>@[email protected](name="workspaceId")<def_stmt>workspace_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_id")<block_end>@workspace_id.setter<def_stmt>workspace_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workspace_id" value)<block_end>@[email protected](name="workspaceName")<def_stmt>workspace_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_name")<block_end>@workspace_name.setter<def_stmt>workspace_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workspace_name" value)<block_end>@[email protected](name="writeAccessId")<def_stmt>write_access_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep><return>pulumi.get(self "write_access_id")<block_end>@write_access_id.setter<def_stmt>write_access_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "write_access_id" value)<block_end><block_end>@pulumi.input_type<class_stmt>_LinkedServiceState<block_start><def_stmt>__init__ __self__ * linked_service_name:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> read_access_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> resource_id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> workspace_id:Optional[pulumi.Input[str]]=<none> workspace_name:Optional[pulumi.Input[str]]=<none> write_access_id:Optional[pulumi.Input[str]]=<none><block_start>"""
Input properties used for looking up and filtering LinkedService resources.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<if_stmt>linked_service_name<is><not><none><block_start>warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")<block_end><if_stmt>linked_service_name<is><not><none><block_start>pulumi.set(__self__ "linked_service_name" linked_service_name)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>read_access_id<is><not><none><block_start>pulumi.set(__self__ "read_access_id" read_access_id)<block_end><if_stmt>resource_group_name<is><not><none><block_start>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end><if_stmt>resource_id<is><not><none><block_start>warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")<block_end><if_stmt>resource_id<is><not><none><block_start>pulumi.set(__self__ "resource_id" resource_id)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><if_stmt>workspace_id<is><not><none><block_start>pulumi.set(__self__ "workspace_id" workspace_id)<block_end><if_stmt>workspace_name<is><not><none><block_start>warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")<block_end><if_stmt>workspace_name<is><not><none><block_start>pulumi.set(__self__ "workspace_name" workspace_name)<block_end><if_stmt>write_access_id<is><not><none><block_start>pulumi.set(__self__ "write_access_id" write_access_id)<block_end><block_end>@[email protected](name="linkedServiceName")<def_stmt>linked_service_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "linked_service_name")<block_end>@linked_service_name.setter<def_stmt>linked_service_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "linked_service_name" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@[email protected](name="readAccessId")<def_stmt>read_access_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "read_access_id")<block_end>@read_access_id.setter<def_stmt>read_access_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "read_access_id" value)<block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@[email protected](name="resourceId")<def_stmt>resource_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "resource_id")<block_end>@resource_id.setter<def_stmt>resource_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_id" value)<block_end>@[email protected]<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end>@[email protected](name="workspaceId")<def_stmt>workspace_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_id")<block_end>@workspace_id.setter<def_stmt>workspace_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workspace_id" value)<block_end>@[email protected](name="workspaceName")<def_stmt>workspace_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_name")<block_end>@workspace_name.setter<def_stmt>workspace_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workspace_name" value)<block_end>@[email protected](name="writeAccessId")<def_stmt>write_access_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep><return>pulumi.get(self "write_access_id")<block_end>@write_access_id.setter<def_stmt>write_access_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "write_access_id" value)<block_end><block_end><class_stmt>LinkedService(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> linked_service_name:Optional[pulumi.Input[str]]=<none> read_access_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> resource_id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> workspace_id:Optional[pulumi.Input[str]]=<none> workspace_name:Optional[pulumi.Input[str]]=<none> write_access_id:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:LinkedServiceArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>"""
Manages a Log Analytics Linked Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic",
tags={
"environment": "development",
})
example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="PerGB2018",
retention_in_days=30)
example_linked_service = azure.loganalytics.LinkedService("exampleLinkedService",
resource_group_name=example_resource_group.name,
workspace_id=example_analytics_workspace.id,
read_access_id=example_account.id)
```
## Import
Log Analytics Workspaces can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/linkedService:LinkedService example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.OperationalInsights/workspaces/workspace1/linkedServices/Automation
```
:param str resource_name: The name of the resource.
:param LinkedServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(LinkedServiceArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> linked_service_name:Optional[pulumi.Input[str]]=<none> read_access_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> resource_id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> workspace_id:Optional[pulumi.Input[str]]=<none> workspace_name:Optional[pulumi.Input[str]]=<none> write_access_id:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=LinkedServiceArgs.__new__(LinkedServiceArgs)<if_stmt>linked_service_name<is><not><none><and><not>opts.urn<block_start>warnings.warn("""This field has been deprecated and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""linked_service_name is deprecated: This field has been deprecated and will be removed in a future version of the provider""")<block_end>__props__.__dict__["linked_service_name"]=linked_service_name<line_sep>__props__.__dict__["read_access_id"]=read_access_id<if_stmt>resource_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'resource_group_name'")<block_end>__props__.__dict__["resource_group_name"]=resource_group_name<if_stmt>resource_id<is><not><none><and><not>opts.urn<block_start>warnings.warn("""This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""resource_id is deprecated: This field has been deprecated in favour of `read_access_id` and will be removed in a future version of the provider""")<block_end>__props__.__dict__["resource_id"]=resource_id<line_sep>__props__.__dict__["tags"]=tags<line_sep>__props__.__dict__["workspace_id"]=workspace_id<if_stmt>workspace_name<is><not><none><and><not>opts.urn<block_start>warnings.warn("""This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""" DeprecationWarning)<line_sep>pulumi.log.warn("""workspace_name is deprecated: This field has been deprecated in favour of `workspace_id` and will be removed in a future version of the provider""")<block_end>__props__.__dict__["workspace_name"]=workspace_name<line_sep>__props__.__dict__["write_access_id"]=write_access_id<line_sep>__props__.__dict__["name"]=<none><block_end>super(LinkedService __self__).__init__('azure:loganalytics/linkedService:LinkedService' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> linked_service_name:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> read_access_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> resource_id:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> workspace_id:Optional[pulumi.Input[str]]=<none> workspace_name:Optional[pulumi.Input[str]]=<none> write_access_id:Optional[pulumi.Input[str]]=<none><arrow>'LinkedService'<block_start>"""
Get an existing LinkedService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] linked_service_name: Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
:param pulumi.Input[str] read_access_id: The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_id: The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] workspace_id: The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] workspace_name: The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
:param pulumi.Input[str] write_access_id: The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_LinkedServiceState.__new__(_LinkedServiceState)<line_sep>__props__.__dict__["linked_service_name"]=linked_service_name<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["read_access_id"]=read_access_id<line_sep>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["resource_id"]=resource_id<line_sep>__props__.__dict__["tags"]=tags<line_sep>__props__.__dict__["workspace_id"]=workspace_id<line_sep>__props__.__dict__["workspace_name"]=workspace_name<line_sep>__props__.__dict__["write_access_id"]=write_access_id<line_sep><return>LinkedService(resource_name opts=opts __props__=__props__)<block_end>@[email protected](name="linkedServiceName")<def_stmt>linked_service_name self<arrow>pulumi.Output[str]<block_start>"""
Name of the type of linkedServices resource to connect to the Log Analytics Workspace specified in workspace_name. Accepted values are `automation` and `cluster`. Defaults to `automation`. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "linked_service_name")<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Output[str]<block_start>"""
The generated name of the Linked Service. The format for this attribute is always `<workspace name>/<linked service type>`(e.g. `workspace1/Automation` or `workspace1/Cluster`)
"""<line_sep><return>pulumi.get(self "name")<block_end>@[email protected](name="readAccessId")<def_stmt>read_access_id self<arrow>pulumi.Output[str]<block_start>"""
The ID of the readable Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "read_access_id")<block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the resource group in which the Log Analytics Linked Service is created. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@[email protected](name="resourceId")<def_stmt>resource_id self<arrow>pulumi.Output[str]<block_start>"""
The ID of the Resource that will be linked to the workspace. This should be used for linking to an Automation Account resource.
"""<line_sep><return>pulumi.get(self "resource_id")<block_end>@[email protected]<def_stmt>tags self<arrow>pulumi.Output[Optional[Mapping[str str]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@[email protected](name="workspaceId")<def_stmt>workspace_id self<arrow>pulumi.Output[str]<block_start>"""
The ID of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_id")<block_end>@[email protected](name="workspaceName")<def_stmt>workspace_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the Log Analytics Workspace that will contain the Log Analytics Linked Service resource. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "workspace_name")<block_end>@[email protected](name="writeAccessId")<def_stmt>write_access_id self<arrow>pulumi.Output[Optional[str]]<block_start>"""
The ID of the writable Resource that will be linked to the workspace. This should be used for linking to a Log Analytics Cluster resource.
"""<line_sep><return>pulumi.get(self "write_access_id")<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>nengo.builder.processes SimProcess<import_from_stmt>nengo.builder.tests.test_operator _test_operator_arg_attributes<def_stmt>test_simprocess <block_start>argnames=["process" "input" "output" "t"]<line_sep>non_signals=["process"]<line_sep>_,sim=_test_operator_arg_attributes(SimProcess argnames non_signals=non_signals)<assert_stmt>str(sim)<eq>"SimProcess{process, input -> output}"<with_stmt>pytest.raises(ValueError match="Unrecognized mode")<block_start>_test_operator_arg_attributes(SimProcess argnames args={"mode":"badval"})<block_end><block_end> |
"""
>>> getg()
5
>>> setg(42)
>>> getg()
42
"""<line_sep>g=5<def_stmt>setg a<block_start><global>g<line_sep>g=a<block_end><def_stmt>getg <block_start><return>g<block_end><class_stmt>Test(object)<block_start>"""
>>> global_in_class
9
>>> Test.global_in_class
Traceback (most recent call last):
AttributeError: type object 'Test' has no attribute 'global_in_class'
>>> Test().global_in_class
Traceback (most recent call last):
AttributeError: 'Test' object has no attribute 'global_in_class'
"""<line_sep><global>global_in_class<line_sep>global_in_class=9<block_end> |
""" A universal module with functions / classes without dependencies. """<import_stmt>functools<import_stmt>re<import_stmt>os<line_sep>_sep=os.path.sep<if_stmt>os.path.altsep<is><not><none><block_start>_sep<augadd>os.path.altsep<block_end>_path_re=re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))<del_stmt>_sep<def_stmt>to_list func<block_start><def_stmt>wrapper *args **kwargs<block_start><return>list(func(*args **kwargs))<block_end><return>wrapper<block_end><def_stmt>to_tuple func<block_start><def_stmt>wrapper *args **kwargs<block_start><return>tuple(func(*args **kwargs))<block_end><return>wrapper<block_end><def_stmt>unite iterable<block_start>"""Turns a two dimensional array into a one dimensional."""<line_sep><return>set(typ<for>types iterable<for>typ types)<block_end><class_stmt>UncaughtAttributeError(Exception)<block_start>"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""<block_end><def_stmt>safe_property func<block_start><return>property(reraise_uncaught(func))<block_end><def_stmt>reraise_uncaught func<block_start>"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
"""<line_sep>@functools.wraps(func)<def_stmt>wrapper *args **kwds<block_start><try_stmt><block_start><return>func(*args **kwds)<block_end><except_stmt>AttributeError<as>e<block_start><raise>UncaughtAttributeError(e)<from>e<block_end><block_end><return>wrapper<block_end><class_stmt>PushBackIterator<block_start><def_stmt>__init__ self iterator<block_start>self.pushes=[]<line_sep>self.iterator=iterator<line_sep>self.current=<none><block_end><def_stmt>push_back self value<block_start>self.pushes.append(value)<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start><if_stmt>self.pushes<block_start>self.current=self.pushes.pop()<block_end><else_stmt><block_start>self.current=next(self.iterator)<block_end><return>self.current<block_end><block_end> |
<import_stmt>re sys os<line_sep>file=open("output" "r")<line_sep>lines=file.readlines()<line_sep>file.close()<line_sep>variable=[]<line_sep>eta1=[]<line_sep>eta2=[]<line_sep>mean=[]<line_sep>error=[]<line_sep>effS=[]<for_stmt>line lines<block_start>elements=re.split("\t" line)<line_sep>variable<augadd>[elements[1] ]<line_sep>eta1<augadd>[re.split(">" re.split("&&" elements[2])[0])[1] ]<line_sep>eta2<augadd>[re.split("<" elements[2])[1] ]<line_sep>mean<augadd>[elements[3] ]<line_sep>error<augadd>[elements[4] ]<line_sep>effS<augadd>[elements[5][:-1] ]<block_end>header="""void plot_MeanVsET(){
TCanvas *c1 = new TCanvas("c1","Mean vs ET", 800, 600);
TH1F* h_emCorr_et = new TH1F("h_emCorr_et","",300,0,300);
TH1F* h_em_et = new TH1F("h_em_et","",300,0,300);
c1->cd();
"""<line_sep>file=open("plot_MeanVsET.C" "w")<line_sep>file.write(header)<for_stmt>i ("emCorr_et" "em_et")<block_start><for_stmt>j range(0 len(eta1))<block_start><if_stmt>variable[j]<ne>i<block_start><continue><block_end>bin=str(int((float(eta1[j])+float(eta2[j]))/2))<line_sep>file.write(" h_"+i+"->SetBinContent("+bin+", "+mean[j]+");\n")<line_sep>file.write(" h_"+i+"->SetBinError ("+bin+", "+error[j]+");\n")<block_end><block_end>file.write(" h_emCorr_et->SetMarkerStyle(23);\n")<line_sep>file.write(" h_em_et ->SetMarkerStyle(20);\n")<line_sep>file.write(" h_emCorr_et->SetMarkerColor(4);\n")<line_sep>file.write(" h_em_et ->SetMarkerColor(1);\n")<line_sep>file.write(" gStyle->SetOptStat(0);\n")<line_sep>file.write(" h_em_et ->Draw();\n")<line_sep>file.write(" h_emCorr_et->Draw(\"SAME\");\n")<line_sep>file.write(" TLine* line = new TLine(0,1,300,1);\n")<line_sep>file.write(" line->Draw();\n")<line_sep>header="""
TAxis* ax = h_em_et->GetXaxis();
ax->SetTitle("Et (GeV)");
TAxis* ay = h_em_et->GetYaxis();
ay->SetTitle("E_{T}^{RECO}/E_{T}^{MC}");
ay->SetRangeUser(0.9,1.05);
TLegend *leg = new TLegend(0.2, 0.2, 0.4, 0.4);
leg->AddEntry(h_em_et, "Before correction");
leg->AddEntry(h_emCorr_et, "After correction ");
leg->Draw();
TLine* line = new TLine(0,1,1.5,1);
line->SetLineWidth(2);
line->SetLineColor(2);
line->Draw();
c1->Print("MeanVsET.ps");
gROOT->ProcessLine(".q");
"""<line_sep>file.write(header)<line_sep>file.write("}\n")<line_sep> |
"""
Dependency source interfaces and implementations for `pip-audit`.
"""<import_from_stmt>.interface DependencyFixError DependencyResolver DependencyResolverError DependencySource DependencySourceError <import_from_stmt>.pip PipSource PipSourceError<import_from_stmt>.requirement RequirementSource<import_from_stmt>.resolvelib ResolveLibResolver<line_sep>__all__=["DependencyFixError" "DependencyResolver" "DependencyResolverError" "DependencySource" "DependencySourceError" "PipSource" "PipSourceError" "RequirementSource" "ResolveLibResolver" ]<line_sep> |
<import_from_stmt>typing Optional Callable Iterable Iterator<import_from_stmt>pathlib Path<import_stmt>random<import_stmt>itertools<import_stmt>spacy<import_stmt>warnings<import_from_stmt>spacy.training Corpus Example<import_from_stmt>spacy.language Language<import_from_stmt>scispacy.custom_tokenizer combined_rule_tokenizer<import_from_stmt>scispacy.data_util read_full_med_mentions read_ner_from_tsv<def_stmt>iter_sample iterable:Iterable sample_percent:float<arrow>Iterator<block_start><for_stmt>item iterable<block_start><if_stmt>len(item.reference)<eq>0<block_start><continue><block_end>coin_flip=random.uniform(0 1)<if_stmt>coin_flip<l>sample_percent<block_start><yield>item<block_end><block_end><block_end>@spacy.registry.callbacks("replace_tokenizer")<def_stmt>replace_tokenizer_callback <arrow>Callable[[Language] Language]<block_start><def_stmt>replace_tokenizer nlp:Language<arrow>Language<block_start>nlp.tokenizer=combined_rule_tokenizer(nlp)<line_sep><return>nlp<block_end><return>replace_tokenizer<block_end>@spacy.registry.readers("parser_tagger_data")<def_stmt>parser_tagger_data path:Path mixin_data_path:Optional[Path] mixin_data_percent:float gold_preproc:bool max_length:int=0 limit:int=0 augmenter:Optional[Callable]=<none> seed:int=0 <arrow>Callable[[Language] Iterator[Example]]<block_start>random.seed(seed)<line_sep>main_corpus=Corpus(path gold_preproc=gold_preproc max_length=max_length limit=limit augmenter=augmenter )<if_stmt>mixin_data_path<is><not><none><block_start>mixin_corpus=Corpus(mixin_data_path gold_preproc=gold_preproc max_length=max_length limit=limit augmenter=augmenter )<block_end><def_stmt>mixed_corpus nlp:Language<arrow>Iterator[Example]<block_start><if_stmt>mixin_data_path<is><not><none><block_start>main_examples=main_corpus(nlp)<line_sep>mixin_examples=iter_sample(mixin_corpus(nlp) mixin_data_percent)<line_sep><return>itertools.chain(main_examples mixin_examples)<block_end><else_stmt><block_start><return>main_corpus(nlp)<block_end><block_end><return>mixed_corpus<block_end>@spacy.registry.readers("med_mentions_reader")<def_stmt>med_mentions_reader directory_path:str split:str<arrow>Callable[[Language] Iterator[Example]]<block_start>train,dev,test=read_full_med_mentions(directory_path label_mapping=<none> span_only=<true> spacy_format=<true>)<def_stmt>corpus nlp:Language<arrow>Iterator[Example]<block_start><if_stmt>split<eq>"train"<block_start>original_examples=train<block_end><elif_stmt>split<eq>"dev"<block_start>original_examples=dev<block_end><elif_stmt>split<eq>"test"<block_start>original_examples=test<block_end><else_stmt><block_start><raise>Exception(f"Unexpected split {split}")<block_end><for_stmt>original_example original_examples<block_start>doc=nlp.make_doc(original_example[0])<with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" category=UserWarning)<line_sep>spacy_example=Example.from_dict(doc original_example[1])<block_end><yield>spacy_example<block_end><block_end><return>corpus<block_end>@spacy.registry.readers("specialized_ner_reader")<def_stmt>specialized_ner_reader file_path:str<block_start>original_examples=read_ner_from_tsv(file_path)<def_stmt>corpus nlp:Language<block_start><for_stmt>original_example original_examples<block_start>doc=nlp.make_doc(original_example[0])<with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter("ignore" category=UserWarning)<line_sep>spacy_example=Example.from_dict(doc original_example[1])<block_end><yield>spacy_example<block_end><block_end><return>corpus<block_end> |
<import_stmt>argparse<import_stmt>importlib.util<import_stmt>pathlib<import_stmt>matplotlib.pyplot<as>plt<import_stmt>tikzplotlib<as>tpl<def_stmt>_main <block_start>parser=argparse.ArgumentParser(description="Refresh all reference TeX files.")<line_sep>parser.parse_args()<line_sep>this_dir=pathlib.Path(__file__).resolve().parent<line_sep>test_files=[f<for>f this_dir.iterdir()<if>(this_dir/f).is_file()<and>f.name[:5]<eq>"test_"<and>f.name[-3:]<eq>".py"]<line_sep>test_modules=[f.name[:-3]<for>f test_files]<line_sep># remove some edge cases
test_modules.remove("test_rotated_labels")<line_sep>test_modules.remove("test_deterministic_output")<line_sep>test_modules.remove("test_cleanfigure")<line_sep>test_modules.remove("test_context")<for_stmt>mod test_modules<block_start>module=importlib.import_module(mod)<line_sep>module.plot()<line_sep>code=tpl.get_tikz_code(include_disclaimer=<false> float_format=".8g")<line_sep>plt.close("all")<line_sep>tex_filename=mod+"_reference.tex"<with_stmt>open(this_dir/tex_filename "w" encoding="utf8")<as>f<block_start>f.write(code)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>_main()<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_stmt>.parser AdblockRules AdblockRule AdblockParsingError<line_sep> |
<import_stmt>os<import_stmt>unittest<import_stmt>numpy<as>np<line_sep>#from scipy.integrate import quadrature as quadrature
<import_from_stmt>scipy.integrate quad<as>quadrature<import_from_stmt>statsmodels.nonparametric kernels<as>sm_kernels<import_from_stmt>hpbandster.optimizers.kde kernels<as>hp_kernels<import_stmt>ConfigSpace<as>CS<import_from_stmt>pdb set_trace<line_sep>rapid_development=<true><line_sep>rapid_development=<false><class_stmt>TestGaussian(unittest.TestCase)<block_start>n_train=256<line_sep>n_test=1024<def_stmt>setUp self<block_start>self.x_train=np.random.rand(self.n_train)<line_sep>self.x_test=np.random.rand(self.n_test)<block_end><def_stmt>tearDown self<block_start>self.x_train=<none><line_sep>self.x_test=<none><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_values self<block_start><for_stmt>bw [1e-3 1e-2 1e-1 1]<block_start>sm_values=sm_kernels.gaussian(bw self.x_train[: <none>] self.x_test[<none> :])<line_sep>hp_kernel=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<false>)<line_sep>hp_values=hp_kernel(self.x_test)<line_sep>self.assertTrue(np.allclose(hp_values sm_values/bw 1e-4))<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_pdf_boundary_simple self<block_start>self.x_train=np.array([0])<for_stmt>bw [1e-3 1e-2 1e-1]# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than twice the pdf
<block_start>hp_kernel1=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<false>)<line_sep>hp_kernel2=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<line_sep>hp_values1=hp_kernel1(self.x_test)<line_sep>hp_values2=hp_kernel2(self.x_test)<line_sep>self.assertTrue(np.allclose(2<times>hp_values1 hp_values2 1e-4))<block_end>self.x_train=np.array([1])<for_stmt>bw [1e-3 1e-2 1e-1]# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than twice the pdf
<block_start>hp_kernel1=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<false>)<line_sep>hp_kernel2=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<line_sep>hp_values1=hp_kernel1(self.x_test)<line_sep>hp_values2=hp_kernel2(self.x_test)<line_sep>self.assertTrue(np.allclose(2<times>hp_values1 hp_values2 1e-4))<block_end># simple test based on 68, 95, 99% rule
self.x_train=np.array([0.5])<for_stmt>bw,w ([0.5 0.6827] [0.25 0.9545] [1/6 0.9973])<block_start>hp_kernel=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<line_sep>self.assertAlmostEqual(hp_kernel.weights[0] 1/w delta=1e-4)<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_pdf_boundary_quadrature self<block_start><for_stmt>bw [1e-2 1e-1 1]<block_start>hp_kernel=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<def_stmt>quad_me x<block_start>x_test=np.array([x])<line_sep>pdfs=hp_kernel(x_test)<line_sep><return>(pdfs.mean())<block_end>self.assertAlmostEqual(quadrature(quad_me 0 1)[0] 1 delta=1e-4)<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_sample self<block_start>num_samples=2<power>20<for_stmt>bw [1e-1 5e-1 1]<block_start>hp_kernel=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<line_sep>samples=hp_kernel.sample(num_samples=num_samples)<line_sep>phat1,x=np.histogram(samples normed=<true>)<line_sep>phat2=hp_kernel((x[1:]+x[:-1])/2).mean(axis=0)<for_stmt>p1,p2 zip(phat1 phat2)<block_start>self.assertAlmostEqual(p1 p2 delta=5e-2)<block_end><block_end><block_end><block_end><class_stmt>Test1dCategorical(unittest.TestCase)<block_start>n_train=256<line_sep>n_test=1024<def_stmt>setUp self<block_start>self.configspace=CS.ConfigurationSpace(43)<line_sep>HPs=[]<line_sep>HPs.append(CS.CategoricalHyperparameter('cat1' choices=['foo' 'bar' 'baz']))<line_sep>self.configspace.add_hyperparameters(HPs)<line_sep>x_train_confs=[self.configspace.sample_configuration()<for>i range(self.n_train)]<line_sep>self.x_train=np.array([c.get_array()<for>c x_train_confs]).squeeze()<line_sep>x_test_confs=[self.configspace.sample_configuration()<for>i range(self.n_test)]<line_sep>self.x_test=np.array([c.get_array()<for>c x_train_confs]).squeeze()<block_end><def_stmt>tearDown self<block_start>self.configspace=<none><line_sep>self.x_train=<none><line_sep>self.x_test=<none><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_values self<block_start><for_stmt>bw [1e-3 1e-2 1e-1 1]<block_start>sm_values=[]<for_stmt>x self.x_test<block_start>sm_values.append(sm_kernels.aitchison_aitken(bw self.x_train x))<block_end>sm_values=np.array(sm_values)<line_sep>hp_kernel=hp_kernels.AitchisonAitken(data=self.x_train bandwidth=bw num_values=len(self.configspace.get_hyperparameters()[0].choices))<line_sep>hp_values=hp_kernel(self.x_test)<line_sep>self.assertTrue(np.allclose(hp_values.T sm_values.squeeze() 1e-4))<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_sample self<block_start>num_samples=2<power>20<for_stmt>bw [1e-1 5e-1 1]<block_start>hp_kernel=hp_kernels.AitchisonAitken(data=self.x_train bandwidth=bw num_values=len(self.configspace.get_hyperparameters()[0].choices))<line_sep>samples=hp_kernel.sample(num_samples=num_samples)<line_sep>phat1,phat2=[] []<for_stmt>value [0 1 2]<block_start>phat1.append(np.sum(samples<eq>value)/num_samples)<line_sep>phat2.append(hp_kernel(np.array([value])).mean(axis=0)[0])<block_end><for_stmt>p1,p2 zip(phat1 phat2)<block_start>self.assertAlmostEqual(p1 p2 delta=5e-3)<block_end>self.assertAlmostEqual(np.sum(phat2) 1 delta=1e-5)<block_end><block_end><block_end><class_stmt>Test1dInteger(unittest.TestCase)<block_start>n_train=128<line_sep>n_test=1024<def_stmt>setUp self<block_start>self.configspace=CS.ConfigurationSpace(43)<line_sep>HPs=[]<line_sep>HPs.append(CS.UniformIntegerHyperparameter('int1' lower=-2 upper=2))<line_sep>self.configspace.add_hyperparameters(HPs)<line_sep>x_train_confs=[self.configspace.sample_configuration()<for>i range(self.n_train)]<line_sep>self.x_train=np.array([c.get_array()<for>c x_train_confs]).squeeze()<line_sep>x_test_confs=[self.configspace.sample_configuration()<for>i range(self.n_test)]<line_sep>self.x_test=np.array([c.get_array()<for>c x_test_confs]).squeeze()<block_end><def_stmt>tearDown self<block_start>self.configspace=<none><line_sep>self.x_train=<none><line_sep>self.x_test=<none><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_values self<block_start>n=self.configspace.get_hyperparameters()[0].upper-self.configspace.get_hyperparameters()[0].lower+1<for_stmt>bw [1e-3 1e-2 1e-1 0.99]<block_start>sm_x_train=np.rint(self.x_train<times>n-.5).astype(np.int)<line_sep>sm_x_test=np.rint(self.x_test<times>n-.5).astype(np.int).squeeze()<line_sep>sm_values=np.array([sm_kernels.wang_ryzin(bw sm_x_train[: <none>] x)<for>x sm_x_test]).squeeze()<line_sep>hp_kernel=hp_kernels.WangRyzinInteger(data=self.x_train bandwidth=bw num_values=n fix_boundary=<false>)<line_sep>hp_values=hp_kernel(self.x_test).squeeze()<line_sep>self.assertTrue(np.allclose(hp_values.T sm_values 1e-4))<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_pdf_boundary_quadrature self<block_start>self.x_test=np.array([0 1 2 3 4])/5+(1/10)<for_stmt>bw [1e-2 1e-1 0.99]<block_start>hp_kernel=hp_kernels.WangRyzinInteger(data=self.x_train bandwidth=bw num_values=5 fix_boundary=<true>)<line_sep>hp_values=hp_kernel(self.x_test).mean(axis=0)<line_sep>self.assertAlmostEqual(hp_values.sum() 1 delta=1e-4)<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_sample self<block_start>num_samples=2<power>20<for_stmt>bw [1e-1 5e-1 0.99]<block_start>hp_kernel=hp_kernels.WangRyzinInteger(data=self.x_train bandwidth=bw num_values=5 fix_boundary=<true>)<line_sep>samples=hp_kernel.sample(num_samples=num_samples)<line_sep>phat1,x=np.histogram(samples normed=<true> bins=[0 0.2 .4 .6 .8 1.])<line_sep>phat1<augdiv>5# account for bin width
phat2=hp_kernel((x[1:]+x[:-1])/2).mean(axis=0)<for_stmt>p1,p2 zip(phat1 phat2)<block_start>self.assertAlmostEqual(p1 p2 delta=5e-2)<block_end><block_end><block_end><block_end><class_stmt>Test1dOrdinal(unittest.TestCase)<block_start>n_train=128<line_sep>n_test=5<def_stmt>setUp self<block_start>self.configspace=CS.ConfigurationSpace(43)<line_sep>HPs=[]<line_sep>HPs.append(CS.OrdinalHyperparameter('ord1' ['cold' 'mild' 'warm' 'hot']))<line_sep>self.configspace.add_hyperparameters(HPs)<line_sep>x_train_confs=[self.configspace.sample_configuration()<for>i range(self.n_train)]<line_sep>self.x_train=np.array([c.get_array()<for>c x_train_confs]).squeeze()<line_sep>x_test_confs=[self.configspace.sample_configuration()<for>i range(self.n_test)]<line_sep>self.x_test=np.array([c.get_array()<for>c x_test_confs]).squeeze()<block_end><def_stmt>tearDown self<block_start>self.configspace=<none><line_sep>self.x_train=<none><line_sep>self.x_test=<none><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_values self<block_start><for_stmt>bw [1e-3 1e-2 1e-1 1]<block_start>sm_values=np.array([sm_kernels.wang_ryzin(bw self.x_train[: <none>] x)<for>x self.x_test])<line_sep>hp_kernel=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw fix_boundary=<false>)<line_sep>hp_values=hp_kernel(self.x_test)<line_sep>self.assertTrue(np.allclose(hp_values.T sm_values 1e-4))<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_pdf_boundary_simple self<block_start>self.x_train=np.array([0])<line_sep>self.x_test=np.array([0 1 2 3])<for_stmt>bw [1e-3 1e-2]# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than the scaling computed here
<block_start>hp_kernel1=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<false>)<line_sep>hp_kernel2=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<true>)<line_sep>hp_values1=hp_kernel1(self.x_test).squeeze()<line_sep>hp_values2=hp_kernel2(self.x_test).squeeze()<line_sep>weight=1-hp_values1[1:].sum()<line_sep>self.assertTrue(np.allclose(hp_values1/weight hp_values2 1e-4))<block_end>self.x_train=np.array([3])<line_sep>self.x_test=np.array([0 1 2 3])<for_stmt>bw [1e-3 1e-2]# note: for larger bandwidths, the pdf also needs to be truncated as +1,
# which leads to something different than the scaling computed here
<block_start>hp_kernel1=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<false>)<line_sep>hp_kernel2=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<true>)<line_sep>hp_values1=hp_kernel1(self.x_test).squeeze()<line_sep>hp_values2=hp_kernel2(self.x_test).squeeze()<line_sep>weight=1-hp_values1[:-1].sum()<line_sep>self.assertTrue(np.allclose(hp_values1/weight hp_values2 1e-4))<block_end># simple test based on 68, 95, 99% rule
self.x_train=np.array([0.5])<for_stmt>bw,w ([0.5 0.6827] [0.25 0.9545] [1/6 0.9973])<block_start>hp_kernel=hp_kernels.Gaussian(data=self.x_train bandwidth=bw fix_boundary=<true>)<line_sep>self.assertAlmostEqual(hp_kernel.weights[0] 1/w delta=1e-4)<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_pdf_boundary_quadrature self<block_start>self.x_test=np.array([0 1 2 3])<for_stmt>bw [1e-2 1e-1 0.99]<block_start>hp_kernel=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<true>)<line_sep>hp_values=hp_kernel(self.x_test).mean(axis=0)<line_sep>self.assertAlmostEqual(hp_values.sum() 1 delta=1e-4)<block_end><block_end>@unittest.skipIf(rapid_development "test skipped to accelerate developing new tests")<def_stmt>test_sample self<block_start>num_samples=2<power>20<for_stmt>bw [1e-1 5e-1 0.99]<block_start>hp_kernel=hp_kernels.WangRyzinOrdinal(data=self.x_train bandwidth=bw num_values=4 fix_boundary=<true>)<line_sep>samples=hp_kernel.sample(num_samples=num_samples)<line_sep>phat1,x=np.histogram(samples normed=<true> bins=[-0.5 0.5 1.5 2.5 3.5])<line_sep>phat2=hp_kernel((x[1:]+x[:-1])/2).mean(axis=0)<for_stmt>p1,p2 zip(phat1 phat2)<block_start>self.assertAlmostEqual(p1 p2 delta=5e-2)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""<import_stmt>boto3<def_stmt>handler event context<block_start>print(event)<line_sep>weights=event['weights']<line_sep>func_name=event['function-name']<line_sep>version=event['new-version']<line_sep>alias_name=event['alias-name']<if_stmt>'current-weight'<in>event<block_start>current_weight=event['current-weight']<line_sep>next_weight=get_next_weight(weights current_weight)<block_end><else_stmt><block_start>next_weight=weights[0]<block_end>update_weight(func_name alias_name version next_weight)<line_sep><return>next_weight<block_end><def_stmt>get_next_weight weights current_weight<block_start>index=weights.index(current_weight)<line_sep><return>weights[index+1]<block_end><def_stmt>update_weight func_name alias_name version next_weight<block_start>print("next weight: {0}".format(next_weight))<line_sep>client=boto3.client('lambda')<line_sep>weights={version:next_weight}<line_sep>routing_config={'AdditionalVersionWeights':weights}<line_sep>res=client.update_alias(FunctionName=func_name Name=alias_name RoutingConfig=routing_config)<line_sep>print(res)<line_sep><return><block_end> |
# keybindings.py
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_stmt>logging<import_from_stmt>typing Dict Callable List Tuple<import_from_stmt>.config ApplicationSettings<def_stmt>parse_keystroke shortcut:str<arrow>List[str]<block_start>"""
Translates a keystroke description like "<Ctrl><Alt>P" in a list ["control", "alt", "p"]
"""<line_sep>res=[]<for_stmt>sub ["<Ctrl>" "<Ctr>" "<Control>" "Control" "<ctrl>" "<ctr>" "<control>" "control"]<block_start><if_stmt>sub<in>shortcut<block_start>shortcut=shortcut.replace(sub "")<line_sep>res<augadd>["control"]<line_sep><break><block_end><block_end><for_stmt>sub ["<Alt>" "<alt>" "Alt" "alt"]<block_start><if_stmt>sub<in>shortcut<block_start>shortcut=shortcut.replace(sub "")<line_sep>res<augadd>["alt"]<line_sep><break><block_end><block_end><for_stmt>sub ["<Shift>" "<shift>" "Shift" "shift"]<block_start><if_stmt>sub<in>shortcut<block_start>shortcut=shortcut.replace(sub "")<line_sep>res<augadd>["shift"]<line_sep><break><block_end><block_end><for_stmt>sub ["<Meta>" "<meta>" "Meta" "meta" "<Super>" "<super>" "Super" "super"]<block_start><if_stmt>sub<in>shortcut<block_start>shortcut=shortcut.replace(sub "")<line_sep>res<augadd>["super"]<line_sep><break><block_end><block_end><if_stmt>len(shortcut)<g>0<block_start>res<augadd>[shortcut.lower()]<block_end><return>res<block_end><class_stmt>Keybindings(object)<block_start><def_stmt>__init__ self settings:ApplicationSettings mappings:Dict[str Dict[str Tuple[str Callable]]]<block_start>"""
Creates keybindings for shortcuts stores in GSettings.
The list of settings cannot be changed after created.
Pass a map of (setting_id -> callback)
"""<line_sep>super().__init__()<line_sep>self._mappings=mappings<line_sep>self._settings=settings<line_sep>self._active_shortcuts=dict()<line_sep># see https://github.com/timeyyy/system_hotkey
<import_from_stmt>system_hotkey SystemHotkey<line_sep>self._keybinder=SystemHotkey(check_queue_interval=0.01 use_xlib=<true>)<line_sep>self.rebind_all()<block_end><def_stmt>rebind_all self<block_start><for_stmt>category,shortcuts self._mappings.items()<block_start><if_stmt><not>shortcuts<block_start><continue><block_end><for_stmt>title,info shortcuts.items()<block_start>shortcut_id,callback=info<line_sep>shortcut=self._settings.get_keybinding(shortcut_id)<line_sep>parsed=parse_keystroke(shortcut)<if_stmt><not>callback<block_start>logging.warning(f"Empty callback for shortcut '{shortcut_id}': ignored")<line_sep><continue><block_end><if_stmt><not>shortcut<block_start>logging.warning(f"Empty shortcut for settings '{shortcut_id}': ignored")<line_sep><continue><block_end>logging.info(f"Binding '{shortcut_id}' -> '{callback.__name__}'")<if_stmt>shortcut<and>shortcut<in>self._active_shortcuts<and>self._active_shortcuts[shortcut]<ne>callback<block_start>logging.debug(f"Removing current binding '{shortcut}'")<try_stmt><block_start>self._keybinder.unregister(parsed)<del_stmt>self._active_shortcuts[shortcut]<block_end><except_stmt>Exception<as>e<block_start>logging.error(f"Could not unbind '{shortcut}': {e}")<line_sep><continue><block_end><block_end><if_stmt>shortcut<and>shortcut<not><in>self._active_shortcuts<block_start>logging.info(f"Binding '{shortcut}' ({parsed}) to '{callback.__name__}'")<try_stmt><block_start>self._keybinder.register(parsed callback=callback)<line_sep>self._active_shortcuts[shortcut]=callback<block_end><except_stmt>Exception<as>e<block_start>logging.error(f"Could not bind {shortcut} to {callback.__name__}: {e}")<line_sep><continue><block_end>self._settings.connect(f"changed::{shortcut_id}" <lambda>k s:self.rebind_all())<block_end><block_end><block_end><block_end><block_end> |
<import_stmt>spacy<line_sep>nlp=spacy.load("zh_core_web_sm")<line_sep>text=("在300多年的风雨历程中,历代同仁堂人始终恪守“炮制虽繁必不敢省人工,品味虽贵必不敢减物力”的古训,"<concat>"树立“修合无人见,存心有天知”的自律意识,造就了制药过程中兢兢小心、精益求精的严细精神。")<line_sep># 关闭tagger和parser
<with_stmt>nlp.disable_pipes("tagger" "parser")# 处理文本
<block_start>doc=nlp(text)<line_sep># 打印doc中的实体
print(doc.ents)<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>collections OrderedDict<import_stmt>os<import_stmt>sys<import_from_stmt>base *<import_stmt>requests<line_sep>ASSERT_RESPONSE=b"Hello world!"<line_sep>RESPONSE=[b"Hello " b"world!"]<class_stmt>App(BaseApp)<block_start>environ=<none><def_stmt>__call__ self environ start_response<block_start>status='200 OK'<line_sep>response_headers=[('Content-type' 'text/plain')]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep>print(environ)<line_sep><return>RESPONSE<block_end><block_end><class_stmt>ErrApp(BaseApp)<block_start><def_stmt>__call__ self environ start_response<block_start>status='200 OK'<line_sep>response_headers=[('Content-type' 'text/plain')]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep>print(environ)<line_sep>environ["XXXX"]<line_sep><return>SIMPLE_RESPONSE<block_end><block_end><class_stmt>ErrAppEx(BaseApp)<block_start><def_stmt>__call__ self environ start_response<block_start>status='500 InternalServerError'<line_sep>response_headers=[('Content-type' 'text/plain')]<line_sep>start_response(status response_headers ZeroDivisionError)<line_sep>self.environ=environ.copy()<line_sep><return>RESPONSE<block_end><block_end><class_stmt>IterErrApp(BaseApp)<block_start><def_stmt>__call__ self environ start_response<block_start>status='200 OK'<line_sep>response_headers=[('Content-type' 'text/plain')]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep>print(environ)<line_sep><return>[1]<block_end><block_end><class_stmt>UpgradeApp(BaseApp)<block_start><def_stmt>__call__ self environ start_response<block_start>status='101 Switching Protocols'<line_sep>response_headers=[('Upgrade' 'websocket')]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep>print(environ)<line_sep><return>[]<block_end><block_end><def_stmt>test_check_key <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/foo/bar")<block_end>env,res=run_client(client App)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("REQUEST_METHOD")<eq>"GET")<assert_stmt>(env.get("SCRIPT_NAME")<eq>"")<assert_stmt>(env.get("PATH_INFO")<eq>"/foo/bar")<assert_stmt>(env.get("QUERY_STRING")<eq><none>)<assert_stmt>(env.get("CONTENT_TYPE")<eq><none>)<line_sep># assert(env.get("CONTENT_LENGTH") == "0")
<assert_stmt>(env.get("SERVER_NAME")<eq>"0.0.0.0")<assert_stmt>(env.get("SERVER_PORT")<eq>"8000")<assert_stmt>(env.get("SERVER_PROTOCOL")<eq>"HTTP/1.1")<assert_stmt>(env.get("HTTP_USER_AGENT")<ne><none>)<block_end><def_stmt>test_simple <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/")<block_end>env,res=run_client(client App)<line_sep># print(res.content)
<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("PATH_INFO")<eq>"/")<assert_stmt>(env.get("QUERY_STRING")<eq><none>)<block_end><def_stmt>test_encode <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/あいう")<block_end>env,res=run_client(client App)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<line_sep>path_info=env.get('PATH_INFO')<line_sep>expected="/あいう"# utf-8
<if_stmt>sys.version_info[0]<g>2<block_start>expected=expected.encode('utf-8').decode('latin1')<block_end><assert_stmt>(env.get("PATH_INFO")<eq>expected)<assert_stmt>(env.get("QUERY_STRING")<eq><none>)<block_end><def_stmt>test_query <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/ABCDEF?a=1234&bbbb=ccc")<block_end>env,res=run_client(client App)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("PATH_INFO")<eq>"/ABCDEF")<assert_stmt>(env.get("QUERY_STRING")<eq>"a=1234&bbbb=ccc")<block_end><def_stmt>test_chunk_response <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/")<block_end>env,res=run_client(client App)<line_sep>headers=res.headers<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(headers["transfer-encoding"]<eq>"chunked")<assert_stmt>(headers["connection"]<eq>"close")<block_end><def_stmt>test_err <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/")<block_end>env,res=run_client(client ErrApp)<assert_stmt>(res.status_code<eq>500)<block_end><def_stmt>test_iter_err <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/")<block_end>env,res=run_client(client IterErrApp)<assert_stmt>(res.status_code<eq>500)<block_end><def_stmt>test_headers <block_start><def_stmt>client <block_start>headers={"X-TEST":"123" "DNT":"1"}<line_sep><return>requests.get("http://localhost:8000/" headers=headers)<block_end>env,res=run_client(client App)<assert_stmt>(res.status_code<eq>200)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env["HTTP_X_TEST"]<eq>"123")<assert_stmt>(env["HTTP_DNT"]<eq>"1")<block_end><def_stmt>test_post <block_start><def_stmt>client <block_start>payload=OrderedDict([('key1' 'value1') ('key2' 'value2')])<line_sep><return>requests.post("http://localhost:8000/" data=payload)<block_end>env,res=run_client(client App)<assert_stmt>(res.status_code<eq>200)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("wsgi.input").read()<eq>b"key1=value1&key2=value2")<block_end><def_stmt>gen <block_start><yield>b"key1=value1&key2=value2"<block_end><def_stmt>test_post_chunked <block_start><def_stmt>client <block_start><return>requests.post("http://localhost:8000/" data=gen())<block_end>env,res=run_client(client App)<assert_stmt>(res.status_code<eq>200)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("wsgi.input").read()<eq>b"key1=value1&key2=value2")<block_end><def_stmt>test_upload_file <block_start><def_stmt>client <block_start>filepath=os.path.join(os.path.dirname(__file__) "wallpaper.jpg")<line_sep>files={'wallpaper.jpg':open(filepath 'rb')}<line_sep><return>requests.post("http://localhost:8000/" files=files)<block_end>env,res=run_client(client App)<assert_stmt>(res.status_code<eq>200)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<line_sep>length=env["CONTENT_LENGTH"]<line_sep>data=env.get("wsgi.input").read()<assert_stmt>(len(data)<eq>int(length))<block_end><def_stmt>test_error <block_start><def_stmt>client <block_start><return>requests.get("http://localhost:8000/foo/bar")<block_end>env,res=run_client(client ErrAppEx)<assert_stmt>(res.status_code<eq>500)<assert_stmt>(res.content<eq>ASSERT_RESPONSE)<assert_stmt>(env.get("REQUEST_METHOD")<eq>"GET")<block_end><def_stmt>test_upgrade <block_start>"""This server will assume the application will correctly set the
"Upgrade" header, and automatically set the "Connection" header to
"upgrade", instead of "keep-alive" or "close", for a response with
status "101 Switching Protocols". That is likely to better conform to
RFC 7230 (HTTP/1.1) and RFC 6455 (WebSocket).
"""<def_stmt>client <block_start><return>requests.get("http://localhost:8000")<block_end>env,res=run_client(client UpgradeApp)<line_sep>headers=res.headers<assert_stmt>(res.status_code<eq>101)<assert_stmt>(headers["upgrade"]<eq>"websocket")<assert_stmt>(headers["connection"]<eq>"upgrade")<block_end><def_stmt>test_no_content <block_start><class_stmt>App(BaseApp)<block_start><def_stmt>__call__ self environ start_response<block_start>status="204 No Content"<line_sep>response_headers=[]<line_sep>start_response(status response_headers)<line_sep>self.environ=environ.copy()<line_sep><return>[]<block_end><block_end><def_stmt>client <block_start><return>requests.get("http://localhost:8000")<block_end>env,res=run_client(client App)<line_sep>headers=res.headers<line_sep># print(env)
# print(res)
<assert_stmt>(res.status_code<eq>204)<assert_stmt>("Content-Length"<not><in>headers)<assert_stmt>("Transfer-Encoding"<not><in>headers)<block_end> |
"""
@author: wangguanan
@contact: <EMAIL>
"""<import_stmt>numpy<as>np<import_stmt>copy<import_from_stmt>PIL Image<import_stmt>torch.utils.data<as>data<import_from_stmt>.samplers PKSampler<class_stmt>ReIDDataset<block_start><def_stmt>__init__ self samples transform<block_start>self.samples=samples<line_sep>self.transform=transform<block_end><def_stmt>__getitem__ self index<block_start>sample=copy.deepcopy(self.samples[index])<line_sep>sample[0]=self._loader(sample[0])<if_stmt>self.transform<is><not><none><block_start>sample[0]=self.transform(sample[0])<block_end>sample[1]=np.array(sample[1])<line_sep><return>sample<block_end><def_stmt>__len__ self<block_start><return>len(self.samples)<block_end><def_stmt>_loader self img_path<block_start><return>Image.open(img_path).convert('RGB')<block_end><block_end><class_stmt>DataManager(object)<block_start>'''
Args:
sources(list): tuples of torch.data.ReIDDataset, source datasets to train with
target(torch.data.ReIDDataset): target dataset to evaluate on
transforms_train(torch.torchvision.transforms):
transforms_test(torch.torchvision.transforms):
sampler(str): sample strategy for train dataset, support 'pk' and 'random'.
when 'pk', params 'p' and 'k' must be given.
when 'random', params 'batch_size' must be given.
Example:
datamanager = DataManager(
sources=[lightreid.data.Market1501(data_path='', combineall=False), lightreid.data.DukeMTMCreID(data_path='', combineall=False)],
target=lightreid.data.Market1501(data_path='', combineall=False),
transforms_train=lightreid.data.build_transforms(img_size=[256,128], transform_list=['randomflip', 'padcrop', 'colorjitor', 'rea']),
transforms_test=lightreid.data.build_transforms(img_size=[256,128], transform_list=[]),
sampler='pk', p=16, k=4
)
train_loader = datamanager.train_loader
query_loader = datamanager.query_loader
gallery_loader = datamanager.gallery_loader
'''<line_sep>KWARGS=['batch_size' 'p' 'k']<line_sep>SAMPLERS=['random' 'pk']<def_stmt>__init__ self sources target transforms_train transforms_test sampler **kwargs# check param sample and kwargs is legal
<block_start><assert_stmt>sampler<in>DataManager.SAMPLERS 'sampler expect {}. but got {}'.format(DataManager.SAMPLERS sampler)<line_sep># init train/query/gallery dataset
train=self.combine([source.train<for>source sources])<line_sep>self.class_num=len(set([sample[1]<for>sample train]))<line_sep>self.train_dataset=ReIDDataset(train transforms_train)<line_sep>self.query_gallery_dataset_dict={}<for_stmt>val target<block_start>query_dataset=ReIDDataset(val.query transforms_test)<line_sep>gallery_dataset=ReIDDataset(val.gallery transforms_test)<line_sep>self.query_gallery_dataset_dict[val.__class__.__name__]=(query_dataset gallery_dataset)<block_end># train loader
<if_stmt>sampler<eq>'random'<block_start><assert_stmt>'batch_size'<in>kwargs.keys() 'param batch_size(int) must be given when sample=\'random\''<line_sep>batch_size=kwargs['batch_size']<line_sep>self.train_loader=data.DataLoader(self.train_dataset batch_size=batch_size num_workers=8 drop_last=<true> shuffle=<true>)<block_end><elif_stmt>sampler<eq>'pk'<block_start><assert_stmt>'p'<in>kwargs.keys()<and>'k'<in>kwargs.keys() 'param p(int) and k(int) must be given when sample=\'random\''<line_sep>p,k=kwargs['p'] kwargs['k']<line_sep>self.train_loader=data.DataLoader(self.train_dataset batch_size=p<times>k num_workers=8 drop_last=<true> sampler=PKSampler(self.train_dataset k=k))<block_end><else_stmt><block_start><assert_stmt>0 'expect {}. but got {}'.format(DataManager.SAMPLERS sampler)<block_end># query and gallery loader
self.query_gallery_loader_dict={}<for_stmt>dataset_name,(query_dataset gallery_dataset) self.query_gallery_dataset_dict.items()<block_start>query_loader=data.DataLoader(query_dataset batch_size=64 num_workers=8 drop_last=<false> shuffle=<false>)<line_sep>gallery_loader=data.DataLoader(gallery_dataset batch_size=64 num_workers=8 drop_last=<false> shuffle=<false>)<line_sep>self.query_gallery_loader_dict[dataset_name]=(query_loader gallery_loader)<block_end><block_end><def_stmt>combine self samples_list<block_start>'''combine more than one samples (e.g. market.train and duke.train) as a samples'''<line_sep>all_samples=[]<line_sep>max_pid,max_cid=0 0<for_stmt>samples samples_list<block_start><for_stmt>a_sample samples<block_start>img_path=a_sample[0]<line_sep>pid=max_pid+a_sample[1]<line_sep>cid=max_cid+a_sample[2]<line_sep>all_samples.append([img_path pid cid])<block_end>max_pid=max([sample[1]<for>sample all_samples])<line_sep>max_cid=max([sample[2]<for>sample all_samples])<block_end><return>all_samples<block_end><block_end> |
#MIT License(MIT)
# CertToHex.py Version 1.0.0 #
# Copyright(c) 2018 <NAME> #
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
<import_stmt>binascii<line_sep>filename='howsmysslBase64.cer'<with_stmt>open(filename 'rb')<as>f<block_start>content=f.read()<block_end>print('// '+filename)<line_sep>print('const char* test_root_ca = \ ')<line_sep>outString='"'<line_sep>caCertLen=0<line_sep>x=len(content)<for_stmt>i range(0 x-1)<block_start>first=(chr(content[i]))<line_sep># print(first,content[i])
<if_stmt>content[i]<eq>13<block_start>outString=outString+'\\n" \ '<block_end>outString=outString+first<if_stmt>content[i]<eq>10<block_start>outString=outString+'"'<block_end><block_end>outString=outString[:-2]#remove last comma and space
print(outString[:-1]+';')<line_sep> |
<import_stmt>vim os sys<line_sep># Add the library to the Python path.
<for_stmt>p vim.eval("&runtimepath").split(',')<block_start>plugin_dir=os.path.join(p "autoload")<if_stmt>os.path.exists(os.path.join(plugin_dir "splicelib"))<block_start><if_stmt>plugin_dir<not><in>sys.path<block_start>sys.path.append(plugin_dir)<block_end><break><block_end><block_end><import_stmt>splicelib.init<as>splice<line_sep># Wrapper functions ----------------------------------------------------------------
<def_stmt>SpliceInit <block_start>splice.init()<block_end><def_stmt>SpliceOriginal <block_start>splice.modes.current_mode.key_original()<block_end><def_stmt>SpliceOne <block_start>splice.modes.current_mode.key_one()<block_end><def_stmt>SpliceTwo <block_start>splice.modes.current_mode.key_two()<block_end><def_stmt>SpliceResult <block_start>splice.modes.current_mode.key_result()<block_end><def_stmt>SpliceGrid <block_start>splice.modes.key_grid()<block_end><def_stmt>SpliceLoupe <block_start>splice.modes.key_loupe()<block_end><def_stmt>SpliceCompare <block_start>splice.modes.key_compare()<block_end><def_stmt>SplicePath <block_start>splice.modes.key_path()<block_end><def_stmt>SpliceDiff <block_start>splice.modes.current_mode.key_diff()<block_end><def_stmt>SpliceDiffoff <block_start>splice.modes.current_mode.key_diffoff()<block_end><def_stmt>SpliceScroll <block_start>splice.modes.current_mode.key_scrollbind()<block_end><def_stmt>SpliceLayout <block_start>splice.modes.current_mode.key_layout()<block_end><def_stmt>SpliceNext <block_start>splice.modes.current_mode.key_next()<block_end><def_stmt>SplicePrev <block_start>splice.modes.current_mode.key_prev()<block_end><def_stmt>SpliceUse <block_start>splice.modes.current_mode.key_use()<block_end><def_stmt>SpliceUse1 <block_start>splice.modes.current_mode.key_use1()<block_end><def_stmt>SpliceUse2 <block_start>splice.modes.current_mode.key_use2()<block_end> |
<import_from_stmt>dataclasses dataclass<import_stmt>functions<as>fx<import_stmt>glow.gwas.log_reg<as>lr<import_stmt>glow.gwas.approx_firth<as>af<import_stmt>pandas<as>pd<import_from_stmt>nptyping Float NDArray<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>typing Any<line_sep>@dataclass<class_stmt>TestData<block_start>phenotypes:NDArray[(Any ) Float]<line_sep>covariates:NDArray[(Any Any) Float]<line_sep>offset:NDArray[(Any ) Float]<block_end><def_stmt>_get_test_data use_offset use_intercept<block_start>test_file='test-data/r/sex2withoffset.txt'<line_sep>df=pd.read_table(test_file delimiter='\t').astype('float64')<line_sep>phenotypes=df['case']<line_sep>covariates=df.loc[: 'age':'dia']<if_stmt>use_intercept<block_start>covariates.loc[: 'intercept']=1<block_end>offset=df['offset']<if_stmt><not>use_offset<block_start>offset=offset<times>0<block_end><return>TestData(phenotypes.to_numpy() covariates.to_numpy() offset.to_numpy())<block_end><def_stmt>_compare_full_firth_beta test_data golden_firth_beta<block_start>beta_init=np.zeros(test_data.covariates.shape[1])<line_sep>X=test_data.covariates<line_sep>y=test_data.phenotypes<line_sep>offset=test_data.offset<line_sep>test_firth_fit=af._fit_firth(beta_init=beta_init X=X y=y offset=offset)<line_sep>test_firth_beta=test_firth_fit.beta<assert_stmt>np.allclose(golden_firth_beta test_firth_beta)<block_end><def_stmt>test_full_firth # table = read.table("sex2withoffset.txt", header=True)
# logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset), data=table)
<block_start>golden_firth_beta=[-1.1715911 # age
0.1568537 # oc
2.4752617 # vic
-2.2125007 # vicl
-0.8604622 # vis
2.7397140 # dia
-0.5679234# intercept
]<line_sep>test_data=_get_test_data(use_offset=<true> use_intercept=<true>)<line_sep>_compare_full_firth_beta(test_data golden_firth_beta)<block_end><def_stmt>test_full_firth_no_offset # logistf(case ~ age+oc+vic+vicl+vis+dia, data=table)
<block_start>golden_firth_beta=[-1.10598130 # age
-0.06881673 # oc
2.26887464 # vic
-2.11140816 # vicl
-0.78831694 # vis
3.09601263 # dia
0.12025404# intercept
]<line_sep>test_data=_get_test_data(use_offset=<false> use_intercept=<true>)<line_sep>_compare_full_firth_beta(test_data golden_firth_beta)<block_end><def_stmt>test_full_firth_no_intercept # logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset)-1, data=table)
<block_start>golden_firth_beta=[-1.2513849 # age
-0.3141151 # oc
2.2066573 # vic
-2.2988439 # vicl
-0.9922712 # vis
2.7046574# dia
]<line_sep>test_data=_get_test_data(use_offset=<true> use_intercept=<false>)<line_sep>_compare_full_firth_beta(test_data golden_firth_beta)<block_end><def_stmt>test_null_firth_fit_no_offset <block_start>golden_firth_beta=[-1.10598130 # age
-0.06881673 # oc
2.26887464 # vic
-2.11140816 # vicl
-0.78831694 # vis
3.09601263 # dia
0.12025404# intercept
]<line_sep>test_data=_get_test_data(use_offset=<false> use_intercept=<true>)<line_sep>fit=af.perform_null_firth_fit(test_data.phenotypes test_data.covariates ~np.isnan(test_data.phenotypes) <none> includes_intercept=<true>)<assert_stmt>np.allclose(fit test_data.covariates@golden_firth_beta)<block_end><def_stmt>_read_regenie_df file trait num_snps<block_start>df=pd.read_table(file sep=r'\s+')<line_sep>df=df[df['ID']<le>num_snps]<line_sep>df['phenotype']=trait<line_sep><return>df<block_end><def_stmt>compare_corrections_to_regenie spark pvalue_threshold output_prefix compare_all_cols uncorrected corrected missing=[]<block_start>(genotype_df phenotype_df covariate_df offset_df)=fx.get_input_dfs(spark binary=<true> missing=missing)<line_sep>glowgr_df=lr.logistic_regression(genotype_df phenotype_df covariate_df offset_df correction=lr.correction_approx_firth pvalue_threshold=pvalue_threshold values_column='values').toPandas()<line_sep>fx.compare_to_regenie(output_prefix glowgr_df compare_all_cols)<line_sep>correction_counts=glowgr_df.correctionSucceeded.value_counts(dropna=<false>).to_dict()<if_stmt>uncorrected<g>0# null in Spark DataFrame converts to nan in pandas
<block_start><assert_stmt>correction_counts[np.nan]<eq>uncorrected<block_end><if_stmt>corrected<g>0<block_start><assert_stmt>correction_counts[<true>]<eq>corrected<block_end><assert_stmt><false><not><in>correction_counts<line_sep><return>glowgr_df<block_end>@pytest.mark.min_spark('3')<def_stmt>test_correct_all_versus_regenie spark<block_start>compare_corrections_to_regenie(spark 0.9999 'test_bin_out_firth_' compare_all_cols=<true> uncorrected=0 corrected=200)<block_end>@pytest.mark.min_spark('3')<def_stmt>test_correct_half_versus_regenie spark<block_start>compare_corrections_to_regenie(spark 0.5 'test_bin_out_half_firth_' compare_all_cols=<false> uncorrected=103 corrected=97)<block_end>@pytest.mark.min_spark('3')<def_stmt>test_correct_missing_versus_regenie spark<block_start>compare_corrections_to_regenie(spark 0.9999 'test_bin_out_missing_firth_' compare_all_cols=<true> uncorrected=0 corrected=200 missing=['35_35' '136_136' '77_77' '100_100' '204_204' '474_474'])<block_end> |
"""Support for the SmartWeather weather service."""<import_stmt>logging<import_from_stmt>typing Dict List<import_from_stmt>homeassistant.components.weather ATTR_FORECAST_CONDITION ATTR_FORECAST_PRECIPITATION ATTR_FORECAST_PRECIPITATION_PROBABILITY ATTR_FORECAST_TEMP ATTR_FORECAST_TEMP_LOW ATTR_FORECAST_TIME ATTR_FORECAST_WIND_BEARING ATTR_FORECAST_WIND_SPEED ATTR_WEATHER_HUMIDITY ATTR_WEATHER_PRESSURE ATTR_WEATHER_TEMPERATURE ATTR_WEATHER_WIND_BEARING ATTR_WEATHER_WIND_SPEED WeatherEntity <import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.const CONF_ID TEMP_CELSIUS <import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.util.dt utc_from_timestamp<import_from_stmt>homeassistant.util.temperature celsius_to_fahrenheit<import_from_stmt>pysmartweatherio FORECAST_TYPE_DAILY<import_from_stmt>.const DOMAIN ATTR_CURRENT_ICON ATTR_FCST_UV ATTR_TEMP_HIGH_TODAY ATTR_TEMP_LOW_TODAY DEFAULT_ATTRIBUTION DEVICE_TYPE_WEATHER CONDITION_CLASSES <import_from_stmt>.entity SmartWeatherEntity<line_sep>_LOGGER=logging.getLogger(__name__)<async_keyword><def_stmt>async_setup_entry hass:HomeAssistant entry:ConfigEntry async_add_entities<arrow><none><block_start>"""Add a weather entity from station_id."""<line_sep>unit_system="metric"<if>hass.config.units.is_metric<else>"imperial"<line_sep>fcst_coordinator=hass.data[DOMAIN][entry.entry_id]["fcst_coordinator"]<if_stmt><not>fcst_coordinator.data<block_start><return><block_end>coordinator=hass.data[DOMAIN][entry.entry_id]["coordinator"]<if_stmt><not>coordinator.data<block_start><return><block_end>station_info=hass.data[DOMAIN][entry.entry_id]["station"]<if_stmt><not>station_info<block_start><return><block_end>fcst_type=hass.data[DOMAIN][entry.entry_id]["fcst_type"]<if_stmt><not>fcst_type<block_start><return><block_end>weather_entity=SmartWeatherWeather(coordinator entry.data DEVICE_TYPE_WEATHER station_info fcst_coordinator unit_system fcst_type )<line_sep>async_add_entities([weather_entity] <true>)<line_sep><return><true><block_end><class_stmt>SmartWeatherWeather(SmartWeatherEntity WeatherEntity)<block_start>"""Representation of a weather entity."""<def_stmt>__init__ self coordinator entries device_type server fcst_coordinator unit_system fcst_type <arrow><none><block_start>"""Initialize the SmartWeather weather entity."""<line_sep>super().__init__(coordinator entries device_type server fcst_coordinator <none>)<line_sep>self._name=f"{DOMAIN.capitalize()} {entries[CONF_ID]}"<line_sep>self._unit_system=unit_system<line_sep>self._forecast_type=fcst_type<block_end>@property<def_stmt>name self<arrow>str<block_start>"""Return the name of the sensor."""<line_sep><return>self._name<block_end>@property<def_stmt>temperature self<arrow>int<block_start>"""Return the temperature."""<if_stmt>self._current<is><not><none><block_start><return>self._current.air_temperature<block_end><return><none><block_end>@property<def_stmt>temperature_unit self<arrow>str<block_start>"""Return the unit of measurement."""<line_sep><return>TEMP_CELSIUS<block_end>@property<def_stmt>humidity self<arrow>int<block_start>"""Return the humidity."""<if_stmt>self._current<is><not><none><block_start><return>self._current.relative_humidity<block_end><return><none><block_end>@property<def_stmt>wind_speed self<arrow>float<block_start>"""Return the wind speed."""<if_stmt>self._current<is><not><none><block_start><return>self._current.wind_avg<block_end><return><none><block_end>@property<def_stmt>wind_gust self<arrow>float<block_start>"""Return the wind Gust."""<if_stmt>self._current<is><not><none><block_start><return>self._current.wind_gust<block_end><return><none><block_end>@property<def_stmt>wind_bearing self<arrow>int<block_start>"""Return the wind bearing."""<if_stmt>self._current<is><not><none><block_start><return>self._current.wind_bearing<block_end><return><none><block_end>@property<def_stmt>precipitation self<arrow>float<block_start>"""Return the precipitation."""<if_stmt>self._current<is><not><none><block_start><return>round(self._current.precip_accum_local_day 1)<block_end><return><none><block_end>@property<def_stmt>pressure self<arrow>int<block_start>"""Return the pressure."""<if_stmt>self._current<is><not><none><block_start><if_stmt>self._unit_system<eq>"imperial"<block_start><return>round(self._current.sea_level_pressure 3)<block_end><return>round(self._current.sea_level_pressure 2)<block_end><return><none><block_end>@property<def_stmt>uv self<arrow>int<block_start>"""Return the UV Index."""<if_stmt>self._current<is><not><none><block_start><return>round(self._current.uv 1)<block_end><return><none><block_end>@property<def_stmt>current_condition self<arrow>int<block_start>"""Return Current Condition Icon."""<if_stmt>self._forecast<is><not><none><block_start><return>self._forecast.current_icon<block_end><return><none><block_end>@property<def_stmt>condition self<arrow>str<block_start>"""Return the weather condition."""<line_sep><return>next((k<for>k,v CONDITION_CLASSES.items()<if>self.current_condition<in>v) <none> )<block_end>@property<def_stmt>temp_high_today self<arrow>float<block_start>"""Return Todays High Temp Forecast."""<if_stmt>self._forecast<is><not><none><block_start><if_stmt>self._unit_system<eq>"imperial"<block_start><return>celsius_to_fahrenheit(self._forecast.temp_high_today)<block_end><return>self._forecast.temp_high_today<block_end><return><none><block_end>@property<def_stmt>temp_low_today self<arrow>float<block_start>"""Return Todays Low Temp Forecast."""<if_stmt>self._forecast<is><not><none><block_start><if_stmt>self._unit_system<eq>"imperial"<block_start><return>celsius_to_fahrenheit(self._forecast.temp_low_today)<block_end><return>self._forecast.temp_low_today<block_end><return><none><block_end>@property<def_stmt>attribution self<arrow>str<block_start>"""Return the attribution."""<line_sep><return>DEFAULT_ATTRIBUTION<block_end>@property<def_stmt>device_state_attributes self<arrow>Dict<block_start>"""Return SmartWeather specific attributes."""<line_sep><return>{ATTR_CURRENT_ICON:self.current_condition ATTR_FCST_UV:self.uv ATTR_WEATHER_HUMIDITY:self.humidity ATTR_WEATHER_PRESSURE:self.pressure ATTR_WEATHER_TEMPERATURE:self.temperature ATTR_WEATHER_WIND_BEARING:self.wind_bearing ATTR_WEATHER_WIND_SPEED:self.wind_speed ATTR_TEMP_HIGH_TODAY:self.temp_high_today ATTR_TEMP_LOW_TODAY:self.temp_low_today }<block_end>@property<def_stmt>forecast self<arrow>List<block_start>"""Return the forecast."""<if_stmt>self.fcst_coordinator.data<is><none><or>len(self.fcst_coordinator.data)<l>2<block_start><return><none><block_end>data=[]<for_stmt>forecast self.fcst_coordinator.data<block_start>condition=next((k<for>k,v CONDITION_CLASSES.items()<if>forecast.icon<in>v) <none> )<if_stmt>self._forecast_type<eq>FORECAST_TYPE_DAILY<block_start>data.append({ATTR_FORECAST_TIME:utc_from_timestamp(forecast.epochtime).isoformat() ATTR_FORECAST_TEMP:forecast.temp_high ATTR_FORECAST_TEMP_LOW:forecast.temp_low ATTR_FORECAST_PRECIPITATION:round(forecast.precip 1)<if>forecast.precip<is><not><none><else><none> ATTR_FORECAST_PRECIPITATION_PROBABILITY:forecast.precip_probability ATTR_FORECAST_CONDITION:condition ATTR_FORECAST_WIND_SPEED:forecast.wind_avg ATTR_FORECAST_WIND_BEARING:forecast.wind_bearing })<block_end><else_stmt><block_start>data.append({ATTR_FORECAST_TIME:utc_from_timestamp(forecast.epochtime).isoformat() ATTR_FORECAST_TEMP:forecast.temperature ATTR_FORECAST_PRECIPITATION:round(forecast.precip 1)<if>forecast.precip<is><not><none><else><none> ATTR_FORECAST_PRECIPITATION_PROBABILITY:forecast.precip_probability ATTR_FORECAST_CONDITION:condition ATTR_FORECAST_WIND_SPEED:forecast.wind_avg ATTR_FORECAST_WIND_BEARING:forecast.wind_bearing })<block_end><block_end><return>data<block_end><block_end> |
doctests="""
########### Tests mostly copied from test_listcomps.py ############
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
set([3])
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
set([None])
########### Tests for various scoping corner cases ############
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
set([4])
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
set([4])
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
set([2])
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
set([4])
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
set([2])
"""<line_sep>__test__={'doctests':doctests}<def_stmt>test_main verbose=<none><block_start><import_stmt>sys<import_from_stmt>test test_support<import_from_stmt>test test_setcomps<line_sep>test_support.run_doctest(test_setcomps verbose)<line_sep># verify reference counting
<if_stmt>verbose<and>hasattr(sys "gettotalrefcount")<block_start><import_stmt>gc<line_sep>counts=[<none>]<times>5<for_stmt>i range(len(counts))<block_start>test_support.run_doctest(test_setcomps verbose)<line_sep>gc.collect()<line_sep>counts[i]=sys.gettotalrefcount()<block_end>print(counts)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test_main(verbose=<true>)<block_end> |
<import_stmt>unittest<import_from_stmt>quickbooks QuickBooks<import_from_stmt>quickbooks.objects.vendor Vendor ContactInfo<class_stmt>VendorTests(unittest.TestCase)<block_start><def_stmt>test_unicode self<block_start>vendor=Vendor()<line_sep>vendor.DisplayName="test"<line_sep>self.assertEquals(str(vendor) "test")<block_end><def_stmt>test_to_ref self<block_start>vendor=Vendor()<line_sep>vendor.DisplayName="test"<line_sep>vendor.Id=100<line_sep>ref=vendor.to_ref()<line_sep>self.assertEquals(ref.name "test")<line_sep>self.assertEquals(ref.type "Vendor")<line_sep>self.assertEquals(ref.value 100)<block_end><def_stmt>test_valid_object_name self<block_start>obj=Vendor()<line_sep>client=QuickBooks()<line_sep>result=client.isvalid_object_name(obj.qbo_object_name)<line_sep>self.assertTrue(result)<block_end><block_end><class_stmt>ContactInfoTests(unittest.TestCase)<block_start><def_stmt>test_init self<block_start>contact_info=ContactInfo()<line_sep>self.assertEquals(contact_info.Type "")<line_sep>self.assertEquals(contact_info.Telephone <none>)<block_end><block_end> |
<import_from_stmt>django.urls path<import_from_stmt>. views<import_from_stmt>. api<line_sep>app_name='hcat'<line_sep>urlpatterns=[path('' views.index name='index') path('project/<int:pk>/' views.ProjectDetailView.as_view() name='project_detail') path('project' views.ProjectListView.as_view() name='project_list') path('project/' views.ProjectListView.as_view() name='project_list') ]<line_sep> |
<import_from_stmt>.base CaptionConverter CaptionNode Caption CaptionList CaptionSet <import_from_stmt>.dfxp DFXPWriter DFXPReader<import_from_stmt>.microdvd MicroDVDReader MicroDVDWriter<import_from_stmt>.sami SAMIReader SAMIWriter<import_from_stmt>.srt SRTReader SRTWriter<import_from_stmt>.scc SCCReader SCCWriter<import_from_stmt>.scc.translator translate_scc<import_from_stmt>.webvtt WebVTTReader WebVTTWriter<import_from_stmt>.exceptions CaptionReadError CaptionReadNoCaptions CaptionReadSyntaxError <line_sep>__all__=['CaptionConverter' 'DFXPReader' 'DFXPWriter' 'MicroDVDReader' 'MicroDVDWriter' 'SAMIReader' 'SAMIWriter' 'SRTReader' 'SRTWriter' 'SCCReader' 'SCCWriter' 'translate_scc' 'WebVTTReader' 'WebVTTWriter' 'CaptionReadError' 'CaptionReadNoCaptions' 'CaptionReadSyntaxError' 'detect_format' 'CaptionNode' 'Caption' 'CaptionList' 'CaptionSet']<line_sep>SUPPORTED_READERS=(DFXPReader MicroDVDReader WebVTTReader SAMIReader SRTReader SCCReader )<def_stmt>detect_format caps<block_start>"""
Detect the format of the provided caption string.
:returns: the reader class for the detected format.
"""<for_stmt>reader SUPPORTED_READERS<block_start><if_stmt>reader().detect(caps)<block_start><return>reader<block_end><block_end><return><none><block_end> |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-10-08 13:01
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<import_from_stmt>osf features<import_from_stmt>osf.utils.migrations AddWaffleFlags<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('osf' '0135_user_settings_waffles') ]<line_sep>operations=[AddWaffleFlags([features.EMBER_AUTH_REGISTER]) ]<block_end> |
<import_from_stmt>gazette.spiders.base.fecam FecamGazetteSpider<class_stmt>ScCorreiaPintoSpider(FecamGazetteSpider)<block_start>name="sc_correia_pinto"<line_sep>FECAM_QUERY="cod_entidade:77"<line_sep>TERRITORY_ID="4204558"<block_end> |
<import_from_stmt>nnunet.training.loss_functions.boundary_loss DC_and_BD_loss<import_from_stmt>nnunet.training.network_training.nnUNetTrainer nnUNetTrainer<class_stmt>nnUNetTrainer_DiceBD(nnUNetTrainer)<block_start><def_stmt>__init__ self plans_file fold output_folder=<none> dataset_directory=<none> batch_dice=<true> stage=<none> unpack_data=<true> deterministic=<true> fp16=<false><block_start>super().__init__(plans_file fold output_folder dataset_directory batch_dice stage unpack_data deterministic fp16)<line_sep>self.loss=DC_and_BD_loss({'batch_dice':self.batch_dice 'smooth':1e-5 'do_bg':<false> 'square':<false>} {})<block_end><block_end> |
"""
Copyright 2020 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""<class_stmt>ARN(object)<block_start><def_stmt>__init__ self full partition=<none> service=<none> region=<none> account_id=<none> resource_type=<none> resource=<none> <block_start>self.full=full<line_sep>self.partition=partition<line_sep>self.service=service<line_sep>self.region=region<line_sep>self.account_id=account_id<line_sep>self.resource_type=resource_type<line_sep>self.resource=resource<block_end><def_stmt>to_dict self<block_start><return>{"full":self.full "partition":self.partition "service":self.service "region":self.region "account_id":self.account_id "resource_type":self.resource_type "resource":self.resource }<block_end><block_end><def_stmt>empty_str_to_none str_<block_start><if_stmt>str_<eq>""<block_start><return><none><block_end><return>str_<block_end><def_stmt>arnparse arn_str# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
<block_start><if_stmt><not>arn_str.startswith("arn:")<or>len(arn_str.split(":"))<l>4<block_start><raise>ValueError("Invalid ARN format: {}".format(arn_str))<block_end>elements=arn_str.split(":" 5)<line_sep>elements<augadd>[""]<times>(6-len(elements))<line_sep>resource=elements[5].split("/")[-1]<line_sep>resource_type=<none><line_sep>service=elements[2]<if_stmt>service<eq>"execute-api"<block_start>service="apigateway"<block_end><if_stmt>service<eq>"iam"<block_start>resource_type="/".join(elements[5].split("/")[:-1])# role type
<block_end><elif_stmt>service<eq>"sts"<block_start>res=elements[5].split("/")<if_stmt>len(res)<g>1<block_start>resource_type=res[0]# assumed-role
resource=res[1]# group
<block_end><block_end><elif_stmt>service<eq>"dynamodb"<block_start>resource_type=elements[5].split("/")[0]# table
resource=elements[5].split("/")[1]# table name
<block_end><elif_stmt>service<eq>"s3"<block_start><if_stmt>len(elements[5].split("/"))<g>1<block_start>resource_type=elements[5].split("/" 1)[1]# objects
<block_end>resource=elements[5].split("/")[0]# bucket name
<block_end><elif_stmt>service<eq>"kms"<block_start>resource_type=elements[5].split("/")[0]<block_end><elif_stmt>service<eq>"logs"<block_start>resource_type=elements[5].split(":")[0]<line_sep>resource=":".join(elements[5].split(":")[1:])<block_end><elif_stmt>service<eq>"apigateway"<block_start>resource_type,*resource=elements[5].split("/")<line_sep>resource="/".join(resource)<block_end><elif_stmt>"/"<in>resource<block_start>resource_type,resource=resource.split("/" 1)<block_end><elif_stmt>":"<in>resource<block_start>resource_type,resource=resource.split(":" 1)<block_end><return>ARN(full=arn_str partition=elements[1] service=service region=empty_str_to_none(elements[3]) account_id=empty_str_to_none(elements[4]) resource_type=resource_type resource=resource )<block_end> |
"""
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two
characters may map to the same character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length.
"""<line_sep>__author__='Daniel'<class_stmt>Solution<block_start><def_stmt>isIsomorphic self s t<block_start>"""
:param s:
:param t:
:rtype: bool
"""<line_sep>m={}<line_sep>mapped=set()# case "ab", "aa"
<for_stmt>i xrange(len(s))<block_start><if_stmt>s[i]<not><in>m<and>t[i]<not><in>mapped<block_start>m[s[i]]=t[i]<line_sep>mapped.add(t[i])<block_end><elif_stmt>s[i]<in>m<and>m[s[i]]<eq>t[i]<block_start><pass><block_end><else_stmt><block_start><return><false><block_end><block_end><return><true><block_end><block_end> |
#
# Copyright 2016 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>json<import_from_stmt>pydruid.client BaseDruidClient<try_stmt><block_start><import_from_stmt>tornado gen<import_from_stmt>tornado.httpclient AsyncHTTPClient HTTPError<block_end><except_stmt>ImportError<block_start>print("Warning: unable to import Tornado. The asynchronous client will not work.")<block_end><class_stmt>AsyncPyDruid(BaseDruidClient)<block_start>"""
Asynchronous PyDruid client which mirrors functionality of the synchronous
PyDruid, but it executes queries
asynchronously (using an asynchronous http client from Tornado framework).
Returns Query objects that can be used for exporting query results into
TSV files or pandas.DataFrame objects
for subsequent analysis.
:param str url: URL of Broker node in the Druid cluster
:param str endpoint: Endpoint that Broker listens for queries on
:param dict defaults: (optional) Dict of parameters for the Async HTTP Client subclass
:param str http_client: Tornado HTTP client implementation to use.
Default: None (use simple_httpclient)
Example
.. code-block:: python
:linenos:
>>> from pydruid.async_client import *
>>> query = AsyncPyDruid('http://localhost:8083', 'druid/v2/')
>>> top = yield query.topn(
datasource='twitterstream',
granularity='all',
intervals='2013-10-04/pt1h',
aggregations={"count": doublesum("count")},
dimension='user_name',
filter = Dimension('user_lang') == 'en',
metric='count',
threshold=2
)
>>> print json.dumps(top.query_dict, indent=2)
>>> {
"metric": "count",
"aggregations": [
{
"type": "doubleSum",
"fieldName": "count",
"name": "count"
}
],
"dimension": "user_name",
"filter": {
"type": "selector",
"dimension": "user_lang",
"value": "en"
},
"intervals": "2013-10-04/pt1h",
"dataSource": "twitterstream",
"granularity": "all",
"threshold": 2,
"queryType": "topN"
}
>>> print top.result
>>> [{'timestamp': '2013-10-04T00:00:00.000Z',
'result': [{'count': 7.0, 'user_name': 'user_1'},
{'count': 6.0, 'user_name': 'user_2'}]}]
>>> df = top.export_pandas()
>>> print df
>>> count timestamp user_name
0 7 2013-10-04T00:00:00.000Z user_1
1 6 2013-10-04T00:00:00.000Z user_2
"""<def_stmt>__init__ self url endpoint defaults=<none> http_client=<none><block_start>super(AsyncPyDruid self).__init__(url endpoint)<line_sep>self.async_http_defaults=defaults<line_sep>self.http_client=http_client<block_end>@gen.coroutine<def_stmt>_post self query<block_start>AsyncHTTPClient.configure(self.http_client defaults=self.async_http_defaults)<line_sep>http_client=AsyncHTTPClient()<try_stmt><block_start>headers,querystr,url=self._prepare_url_headers_and_body(query)<line_sep>response=<yield>http_client.fetch(url method="POST" headers=headers body=querystr)<block_end><except_stmt>HTTPError<as>e<block_start>self.__handle_http_error(e query)<block_end><else_stmt><block_start>query.parse(response.body.decode("utf-8"))<line_sep><raise>gen.Return(query)<block_end><block_end>@staticmethod<def_stmt>__handle_http_error e query<block_start>err=<none><if_stmt>e.code<eq>500# has Druid returned an error?
<block_start><try_stmt><block_start>err=json.loads(e.response.body.decode("utf-8"))<block_end><except_stmt>ValueError<block_start><pass><block_end><else_stmt><block_start>err=err.get("error" <none>)<block_end><block_end><raise>IOError("{0} \n Druid Error: {1} \n Query is: {2}".format(e err json.dumps(query.query_dict indent=4)))<block_end>@gen.coroutine<def_stmt>topn self **kwargs<block_start>query=self.query_builder.topn(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end>@gen.coroutine<def_stmt>timeseries self **kwargs<block_start>query=self.query_builder.timeseries(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end>@gen.coroutine<def_stmt>groupby self **kwargs<block_start>query=self.query_builder.groupby(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end>@gen.coroutine<def_stmt>segment_metadata self **kwargs<block_start>query=self.query_builder.segment_metadata(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end>@gen.coroutine<def_stmt>time_boundary self **kwargs<block_start>query=self.query_builder.time_boundary(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end>@gen.coroutine<def_stmt>select self **kwargs<block_start>query=self.query_builder.select(kwargs)<line_sep>result=<yield>self._post(query)<line_sep><raise>gen.Return(result)<block_end><block_end> |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>paddle<import_stmt>paddle.fluid<as>fluid<import_stmt>numpy<as>np<import_stmt>six<import_stmt>os<import_stmt>unittest<import_from_stmt>simple_nets simple_fc_net_with_inputs<line_sep>BATCH_SIZE=32<line_sep>BATCH_NUM=10<line_sep>EPOCH_NUM=4<line_sep>IMAGE_SHAPE=[2 3]<line_sep>LABEL_SHAPE=[1]<line_sep>ALL_WRITTEN_FILES=set()<def_stmt>get_place_string p<block_start><if_stmt>isinstance(p (fluid.CPUPlace<or>fluid.CUDAPlace))<block_start>tmp=fluid.core.Place()<line_sep>tmp.set_place(p)<line_sep>p=tmp<block_end><if_stmt>p._type()<eq>fluid.CPUPlace()._type()<block_start><return>'CPUPlace()'<block_end><else_stmt><block_start><return>'CUDAPlace()'<block_end><block_end><def_stmt>remove_all_written_files <block_start><for_stmt>filename ALL_WRITTEN_FILES<block_start>os.remove(filename)<block_end><block_end><def_stmt>write_reader_data_to_file filename reader<block_start>ALL_WRITTEN_FILES.add(filename)<with_stmt>open(filename 'w')<as>fid<block_start><for_stmt>instance_list reader()<block_start><for_stmt>i,instance enumerate(instance_list)<block_start>instance=np.reshape(instance [instance.size ])<line_sep>fid.write(str(instance.size)+' ')<line_sep>fid.write(' '.join(map(str instance)))<line_sep>fid.write(' ')<block_end>fid.write('\n')<block_end><block_end><block_end><def_stmt>fake_reader batch_size=BATCH_SIZE batch_num=BATCH_NUM<block_start><def_stmt>__reader__ <block_start>iteration=BATCH_SIZE<times>BATCH_NUM<line_sep>iteration=int(iteration+BATCH_SIZE/2)<for_stmt>_ six.moves.range(iteration)<block_start>image=np.random.random(size=IMAGE_SHAPE).astype('float32')<line_sep>label=np.random.random_integers(size=LABEL_SHAPE low=0 high=9).astype('int64')<line_sep><yield>image label<block_end><block_end><return>__reader__<block_end><class_stmt>DatasetLoaderTestBase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.dataset_name="QueueDataset"<line_sep>self.drop_last=<false><block_end><def_stmt>tearDown self<block_start><return><line_sep>remove_all_written_files()<block_end><def_stmt>build_network self<block_start>main_prog=fluid.Program()<line_sep>startup_prog=fluid.Program()<with_stmt>fluid.program_guard(main_prog startup_prog)<block_start>image=fluid.layers.data(name='image' shape=IMAGE_SHAPE dtype='float32')<line_sep>label=fluid.layers.data(name='label' shape=LABEL_SHAPE dtype='int64')<line_sep>simple_fc_net_with_inputs(image label)<block_end><return>main_prog startup_prog [image label]<block_end><def_stmt>check_batch_number self place randomize_batch_num=<false><block_start>main_prog,startup_prog,feeds=self.build_network()<if_stmt>self.dataset_name<eq>"QueueDataset"<block_start>dataset=paddle.distributed.QueueDataset()<block_end><else_stmt><block_start>dataset=paddle.distributed.InMemoryDataset()<block_end>dataset._set_batch_size(BATCH_SIZE)<if_stmt>isinstance(place fluid.CPUPlace)<block_start>file_num=10<line_sep>os.environ['CPU_NUM']=str(file_num)<line_sep>places=fluid.cpu_places()<line_sep>use_cuda=<false><block_end><else_stmt><block_start>file_num=fluid.core.get_cuda_device_count()<line_sep>places=fluid.cuda_places()<line_sep>use_cuda=<true><block_end>filelist=[]<if_stmt>file_num<g>1<and>randomize_batch_num<block_start>random_delta_batch_size=np.random.random_integers(low=-BATCH_NUM/2 high=BATCH_NUM/2 size=[file_num])<line_sep>random_delta_batch_size[-1]=-int(np.sum(random_delta_batch_size[0:-1]))<block_end><else_stmt><block_start>random_delta_batch_size=np.zeros(shape=[file_num])<block_end><for_stmt>i six.moves.range(file_num)<block_start>filename='dataset_test_{}.txt'.format(i)<line_sep>filelist.append(filename)<line_sep>write_reader_data_to_file(filename fake_reader(batch_num=BATCH_NUM+random_delta_batch_size[i]))<block_end>dataset.set_filelist(filelist)<line_sep>dataset._set_use_var(feeds)<line_sep>dataset._set_pipe_command("cat")<if_stmt>self.dataset_name<eq>'InMemoryDataset'<block_start>dataset.load_into_memory()<block_end>dataloader=fluid.io.DataLoader.from_dataset(dataset=dataset places=places drop_last=self.drop_last)<line_sep>prog=fluid.CompiledProgram(main_prog).with_data_parallel()<line_sep>exe=fluid.Executor(place)<line_sep>exe.run(startup_prog)<for_stmt>_ six.moves.range(EPOCH_NUM)<block_start>has_complete_batch=<false><for_stmt>batch_id,data enumerate(dataloader)<block_start>self.assertEquals(len(places) len(data))<for_stmt>idx,data_on_each_device enumerate(data)<block_start>image=data_on_each_device["image"]<line_sep>label=data_on_each_device["label"]<if_stmt>self.drop_last<block_start>batch_size=BATCH_SIZE<block_end><else_stmt><block_start><if_stmt>batch_id<eq>BATCH_NUM<block_start>batch_size=BATCH_SIZE/2<block_end><else_stmt><block_start>batch_size=BATCH_SIZE<block_end><block_end>self.assertEquals(image.shape()[1:] IMAGE_SHAPE)<line_sep>self.assertTrue(image._place()._equals(places[idx]) msg=get_place_string(image._place())+' vs '+get_place_string(places[idx]))<if_stmt>self.drop_last<block_start>self.assertEquals(image.shape()[0] BATCH_SIZE)<block_end><else_stmt><block_start>self.assertTrue(image.shape()[0]<eq>BATCH_SIZE<or>image.shape()[0]<eq>BATCH_SIZE/2)<block_end>self.assertEquals(label.shape()[1:] LABEL_SHAPE)<line_sep>self.assertTrue(label._place()._equals(places[idx]))<if_stmt>self.drop_last<block_start>self.assertEquals(label.shape()[0] BATCH_SIZE)<block_end><else_stmt><block_start>self.assertTrue(label.shape()[0]<eq>BATCH_SIZE<or>label.shape()[0]<eq>BATCH_SIZE/2)<block_end>self.assertEquals(image.shape()[0] label.shape()[0])<if_stmt>image.shape()[0]<eq>BATCH_SIZE<block_start>has_complete_batch=<true><block_end><block_end>exe.run(prog feed=data)<block_end>self.assertTrue(has_complete_batch)<block_end><block_end><def_stmt>get_all_places self<block_start>p=[fluid.CPUPlace()]<if_stmt>fluid.is_compiled_with_cuda()<block_start>p.append(fluid.CUDAPlace(0))<block_end><return>p<block_end><def_stmt>test_batch_number_with_same_length_files self<block_start><for_stmt>p self.get_all_places()<block_start><with_stmt>fluid.scope_guard(fluid.Scope())<block_start>self.check_batch_number(place=p randomize_batch_num=<false>)<block_end><block_end><block_end><def_stmt>test_batch_number_with_different_length_files self<block_start><for_stmt>p self.get_all_places()<block_start><with_stmt>fluid.scope_guard(fluid.Scope())<block_start>self.check_batch_number(place=p randomize_batch_num=<true>)<block_end><block_end><block_end><block_end><class_stmt>QueueDatasetTestWithoutDropLast(DatasetLoaderTestBase)<block_start><def_stmt>setUp self<block_start>self.dataset_name="QueueDataset"<line_sep>self.drop_last=<true><block_end><block_end><class_stmt>InMemoryDatasetTestWithoutDropLast(DatasetLoaderTestBase)<block_start><def_stmt>setUp self<block_start>self.dataset_name="InMemoryDataset"<line_sep>self.drop_last=<false><block_end><block_end><class_stmt>InMemoryDatasetTestWithDropLast(DatasetLoaderTestBase)<block_start><def_stmt>setUp self<block_start>self.dataset_name="InMemoryDataset"<line_sep>self.drop_last=<true><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_from_stmt>collections OrderedDict<import_from_stmt>artemis.experiments.experiment_record_view show_record compare_experiment_records<import_from_stmt>artemis.experiments.experiments Experiment<import_from_stmt>artemis.general.display sensible_str<import_from_stmt>artemis.general.should_be_builtins uniquify_duplicates izip_equal<def_stmt>experiment_function f<block_start>"""
Use this decorator (@experiment_function) on a function that you want to run. e.g.
.. code-block:: python
@experiment_function
def demo_my_experiment(a=1, b=2, c=3):
...
This turns your function demo_my_experiment into an experiment. It can still be called as a normal function, but
it now has can also be called with the methods of an Experiment object (eg. demo_my_experiment.run()).
"""<line_sep><return>ExperimentFunction()(f)<block_end><def_stmt>experiment_root f<block_start>"""
Use this decorator on a function that you want to build variants off of:
.. code-block:: python
@experiment_root
def demo_my_experiment(a, b=2, c=3):
...
The root experiment is not runnable by itself, and will not appear in the list in the browse experiments UI, but
you can call ``demo_my_experiment.add_variant(...)`` to create runnable variants.
"""<line_sep><return>ExperimentFunction(is_root=<true>)(f)<block_end><class_stmt>ExperimentFunction(object)<block_start>"""
This is the most general decorator. You can use this to add details on the experiment.
"""<def_stmt>__init__ self show=show_record compare=compare_experiment_records display_function=<none> comparison_function=<none> one_liner_function=sensible_str is_root=<false><block_start>"""
:param show: A function that is called when you "show" an experiment record in the UI. It takes an experiment
record as an argument.
:param compare: A function that is called when you "compare" a set of experiment records in the UI.
:param display_function: [Deprecated] A function that takes the results (whatever your experiment returns) and displays them.
:param comparison_function: [Deprecated] A function that takes an OrderedDict<experiment_name, experiment_return_value>.
You can optionally define this function to compare the results of different experiments.
You can use call this via the UI with the compare_experiment_results command.
:param one_liner_function: A function that takes your results and returns a 1 line string summarizing them.
:param is_root: True to make this a root experiment - so that it is not listed to be run itself.
"""<line_sep>self.show=show<line_sep>self.compare=compare<if_stmt>display_function<is><not><none><block_start><assert_stmt>show<is>show_record "You can't set both display function and show. (display_function is deprecated)"<line_sep>show=<lambda>rec:display_function(rec.get_result())<block_end><if_stmt>comparison_function<is><not><none><block_start><assert_stmt>compare<is>compare_experiment_records "You can't set both display function and show. (display_function is deprecated)"<def_stmt>compare records<block_start>record_experiment_ids_uniquified=uniquify_duplicates(rec.get_experiment_id()<for>rec records)<line_sep>comparison_function(OrderedDict((unique_rid rec.get_result())<for>unique_rid,rec izip_equal(record_experiment_ids_uniquified records)))<block_end><block_end>self.show=show<line_sep>self.compare=compare<line_sep>self.is_root=is_root<line_sep>self.one_liner_function=one_liner_function<block_end><def_stmt>__call__ self f<block_start>f.is_base_experiment=<true><line_sep>ex=Experiment(name=f.__name__ function=f show=self.show compare=self.compare one_liner_function=self.one_liner_function is_root=self.is_root)<line_sep><return>ex<block_end><block_end> |
""" /retrieve
"""<import_stmt>json<import_from_stmt>flask current_app<as>app<import_from_stmt>flask request jsonify Blueprint<import_from_stmt>td4a.models.exception_handler ExceptionHandler HandledException<import_from_stmt>td4a.models.td4ayaml Td4aYaml<import_from_stmt>jsonschema validate<import_from_stmt>jsonschema Draft4Validator FormatChecker<import_from_stmt>jsonschema.exceptions UnknownType<line_sep>api_validate=Blueprint('api_validate' __name__)# pylint: disable=invalid-name
@ExceptionHandler<def_stmt>parse_yaml yamul typ<block_start>_=typ<line_sep>yaml=Td4aYaml()<line_sep>obj=yaml.load(yamul)<line_sep><return>obj<block_end><def_stmt>validation payload<block_start>""" Validate schema from data
"""<try_stmt><block_start>yaml_safe=Td4aYaml(typ='safe')<line_sep>yaml=Td4aYaml()<line_sep>data=yaml_safe.load(payload['p1'])<line_sep>schema=yaml_safe.load(payload['p2'])<line_sep>errors=[]<line_sep>v=Draft4Validator(schema format_checker=FormatChecker())<for_stmt>error sorted(v.iter_errors(data))<block_start>errors.append(error.message)<block_end><if_stmt>errors<block_start><return>{"p3":yaml.dump({"messages":errors})}<block_end><return>{"p3":yaml.dump({"messages":["validation passed"]})}<block_end><except_stmt>UnknownType<as>error<block_start>error_message=str(error)<line_sep>lines=error_message.splitlines()<line_sep>message=[x<for>x lines<if>x.startswith('Unknown type')]<line_sep><return>{"p3":yaml.dump({"messages":message})}<block_end><block_end>@api_validate.route('/validate' methods=['POST'])<def_stmt>rest_validate <block_start>""" Build a schema for data
"""<try_stmt><block_start>payload=request.json<line_sep>data=parse_yaml(yamul=payload['p1'] typ='p1')<line_sep>schema=parse_yaml(yamul=payload['p2'] typ='p2')<line_sep>response=validation(payload=payload)<line_sep><return>jsonify(response)<block_end><except_stmt>HandledException<as>error<block_start><return>jsonify(error.json())<block_end><block_end> |
<import_from_stmt>.ppo PPO<line_sep> |
<import_from_stmt>torch nn<import_from_stmt>drnn DRNN<class_stmt>DRNN_Copy(nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size num_layers dropout output_size<block_start>super(DRNN_Copy self).__init__()<line_sep>self.drnn=DRNN(cell_type='GRU' dropout=dropout n_hidden=hidden_size n_input=input_size n_layers=num_layers batch_first=<true>)<line_sep>self.linear=nn.Linear(hidden_size output_size)<line_sep>self.init_weights()<block_end><def_stmt>init_weights self<block_start>self.linear.weight.data.normal_(0 0.01)<block_end><def_stmt>forward self x# x: (batch, steps, input_size)
<block_start>y1,_=self.drnn(x)# y1: (batch, steps, hidden_size)
#import pdb
#pdb.set_trace()
<return>self.linear(y1)<block_end><block_end># (batch, steps, output_size)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>sys<line_sep>__dir__=os.path.dirname(__file__)<line_sep>sys.path.append(os.path.join(__dir__ ""))<line_sep>sys.path.append(os.path.join(__dir__ "deploy"))<import_from_stmt>typing Union Generator<import_stmt>argparse<import_stmt>shutil<import_stmt>textwrap<import_stmt>tarfile<import_stmt>requests<import_stmt>warnings<import_from_stmt>functools partial<import_from_stmt>difflib SequenceMatcher<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_from_stmt>prettytable PrettyTable<import_from_stmt>deploy.python.predict_cls ClsPredictor<import_from_stmt>deploy.utils.get_image_list get_image_list<import_from_stmt>deploy.utils config<import_from_stmt>ppcls.arch.backbone *<line_sep>__all__=["PaddleClas"]<line_sep>BASE_DIR=os.path.expanduser("~/.paddleclas/")<line_sep>BASE_INFERENCE_MODEL_DIR=os.path.join(BASE_DIR "inference_model")<line_sep>BASE_IMAGES_DIR=os.path.join(BASE_DIR "images")<line_sep>BASE_DOWNLOAD_URL="https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/{}_infer.tar"<line_sep>MODEL_SERIES={"AlexNet":["AlexNet"] "DarkNet":["DarkNet53"] "DeiT":["DeiT_base_distilled_patch16_224" "DeiT_base_distilled_patch16_384" "DeiT_base_patch16_224" "DeiT_base_patch16_384" "DeiT_small_distilled_patch16_224" "DeiT_small_patch16_224" "DeiT_tiny_distilled_patch16_224" "DeiT_tiny_patch16_224"] "DenseNet":["DenseNet121" "DenseNet161" "DenseNet169" "DenseNet201" "DenseNet264"] "DPN":["DPN68" "DPN92" "DPN98" "DPN107" "DPN131"] "EfficientNet":["EfficientNetB0" "EfficientNetB0_small" "EfficientNetB1" "EfficientNetB2" "EfficientNetB3" "EfficientNetB4" "EfficientNetB5" "EfficientNetB6" "EfficientNetB7"] "GhostNet":["GhostNet_x0_5" "GhostNet_x1_0" "GhostNet_x1_3" "GhostNet_x1_3_ssld"] "HRNet":["HRNet_W18_C" "HRNet_W30_C" "HRNet_W32_C" "HRNet_W40_C" "HRNet_W44_C" "HRNet_W48_C" "HRNet_W64_C" "HRNet_W18_C_ssld" "HRNet_W48_C_ssld"] "Inception":["GoogLeNet" "InceptionV3" "InceptionV4"] "MobileNetV1":["MobileNetV1_x0_25" "MobileNetV1_x0_5" "MobileNetV1_x0_75" "MobileNetV1" "MobileNetV1_ssld"] "MobileNetV2":["MobileNetV2_x0_25" "MobileNetV2_x0_5" "MobileNetV2_x0_75" "MobileNetV2" "MobileNetV2_x1_5" "MobileNetV2_x2_0" "MobileNetV2_ssld"] "MobileNetV3":["MobileNetV3_small_x0_35" "MobileNetV3_small_x0_5" "MobileNetV3_small_x0_75" "MobileNetV3_small_x1_0" "MobileNetV3_small_x1_25" "MobileNetV3_large_x0_35" "MobileNetV3_large_x0_5" "MobileNetV3_large_x0_75" "MobileNetV3_large_x1_0" "MobileNetV3_large_x1_25" "MobileNetV3_small_x1_0_ssld" "MobileNetV3_large_x1_0_ssld"] "RegNet":["RegNetX_4GF"] "Res2Net":["Res2Net50_14w_8s" "Res2Net50_26w_4s" "Res2Net50_vd_26w_4s" "Res2Net200_vd_26w_4s" "Res2Net101_vd_26w_4s" "Res2Net50_vd_26w_4s_ssld" "Res2Net101_vd_26w_4s_ssld" "Res2Net200_vd_26w_4s_ssld"] "ResNeSt":["ResNeSt50" "ResNeSt50_fast_1s1x64d"] "ResNet":["ResNet18" "ResNet18_vd" "ResNet34" "ResNet34_vd" "ResNet50" "ResNet50_vc" "ResNet50_vd" "ResNet50_vd_v2" "ResNet101" "ResNet101_vd" "ResNet152" "ResNet152_vd" "ResNet200_vd" "ResNet34_vd_ssld" "ResNet50_vd_ssld" "ResNet50_vd_ssld_v2" "ResNet101_vd_ssld" "Fix_ResNet50_vd_ssld_v2" "ResNet50_ACNet_deploy"] "ResNeXt":["ResNeXt50_32x4d" "ResNeXt50_vd_32x4d" "ResNeXt50_64x4d" "ResNeXt50_vd_64x4d" "ResNeXt101_32x4d" "ResNeXt101_vd_32x4d" "ResNeXt101_32x8d_wsl" "ResNeXt101_32x16d_wsl" "ResNeXt101_32x32d_wsl" "ResNeXt101_32x48d_wsl" "Fix_ResNeXt101_32x48d_wsl" "ResNeXt101_64x4d" "ResNeXt101_vd_64x4d" "ResNeXt152_32x4d" "ResNeXt152_vd_32x4d" "ResNeXt152_64x4d" "ResNeXt152_vd_64x4d"] "SENet":["SENet154_vd" "SE_HRNet_W64_C_ssld" "SE_ResNet18_vd" "SE_ResNet34_vd" "SE_ResNet50_vd" "SE_ResNeXt50_32x4d" "SE_ResNeXt50_vd_32x4d" "SE_ResNeXt101_32x4d"] "ShuffleNetV2":["ShuffleNetV2_swish" "ShuffleNetV2_x0_25" "ShuffleNetV2_x0_33" "ShuffleNetV2_x0_5" "ShuffleNetV2_x1_0" "ShuffleNetV2_x1_5" "ShuffleNetV2_x2_0"] "SqueezeNet":["SqueezeNet1_0" "SqueezeNet1_1"] "SwinTransformer":["SwinTransformer_large_patch4_window7_224_22kto1k" "SwinTransformer_large_patch4_window12_384_22kto1k" "SwinTransformer_base_patch4_window7_224_22kto1k" "SwinTransformer_base_patch4_window12_384_22kto1k" "SwinTransformer_base_patch4_window12_384" "SwinTransformer_base_patch4_window7_224" "SwinTransformer_small_patch4_window7_224" "SwinTransformer_tiny_patch4_window7_224"] "VGG":["VGG11" "VGG13" "VGG16" "VGG19"] "VisionTransformer":["ViT_base_patch16_224" "ViT_base_patch16_384" "ViT_base_patch32_384" "ViT_large_patch16_224" "ViT_large_patch16_384" "ViT_large_patch32_384" "ViT_small_patch16_224"] "Xception":["Xception41" "Xception41_deeplab" "Xception65" "Xception65_deeplab" "Xception71"]}<class_stmt>ImageTypeError(Exception)<block_start>"""ImageTypeError.
"""<def_stmt>__init__ self message=""<block_start>super().__init__(message)<block_end><block_end><class_stmt>InputModelError(Exception)<block_start>"""InputModelError.
"""<def_stmt>__init__ self message=""<block_start>super().__init__(message)<block_end><block_end><def_stmt>init_config model_name inference_model_dir use_gpu=<true> batch_size=1 topk=5 **kwargs<block_start>imagenet1k_map_path=os.path.join(os.path.abspath(__dir__) "ppcls/utils/imagenet1k_label_list.txt")<line_sep>cfg={"Global":{"infer_imgs":kwargs["infer_imgs"]<if>"infer_imgs"<in>kwargs<else><false> "model_name":model_name "inference_model_dir":inference_model_dir "batch_size":batch_size "use_gpu":use_gpu "enable_mkldnn":kwargs["enable_mkldnn"]<if>"enable_mkldnn"<in>kwargs<else><false> "cpu_num_threads":kwargs["cpu_num_threads"]<if>"cpu_num_threads"<in>kwargs<else>1 "enable_benchmark":<false> "use_fp16":kwargs["use_fp16"]<if>"use_fp16"<in>kwargs<else><false> "ir_optim":<true> "use_tensorrt":kwargs["use_tensorrt"]<if>"use_tensorrt"<in>kwargs<else><false> "gpu_mem":kwargs["gpu_mem"]<if>"gpu_mem"<in>kwargs<else>8000 "enable_profile":<false>} "PreProcess":{"transform_ops":[{"ResizeImage":{"resize_short":kwargs["resize_short"]<if>"resize_short"<in>kwargs<else>256}} {"CropImage":{"size":kwargs["crop_size"]<if>"crop_size"<in>kwargs<else>224}} {"NormalizeImage":{"scale":0.00392157 "mean":[0.485 0.456 0.406] "std":[0.229 0.224 0.225] "order":''}} {"ToCHWImage":<none>}]} "PostProcess":{"main_indicator":"Topk" "Topk":{"topk":topk "class_id_map_file":imagenet1k_map_path}}}<if_stmt>"save_dir"<in>kwargs<block_start><if_stmt>kwargs["save_dir"]<is><not><none><block_start>cfg["PostProcess"]["SavePreLabel"]={"save_dir":kwargs["save_dir"]}<block_end><block_end><if_stmt>"class_id_map_file"<in>kwargs<block_start><if_stmt>kwargs["class_id_map_file"]<is><not><none><block_start>cfg["PostProcess"]["Topk"]["class_id_map_file"]=kwargs["class_id_map_file"]<block_end><block_end>cfg=config.AttrDict(cfg)<line_sep>config.create_attr_dict(cfg)<line_sep><return>cfg<block_end><def_stmt>args_cfg <block_start><def_stmt>str2bool v<block_start><return>v.lower()<in>("true" "t" "1")<block_end>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--infer_imgs" type=str required=<true> help="The image(s) to be predicted.")<line_sep>parser.add_argument("--model_name" type=str help="The model name to be used.")<line_sep>parser.add_argument("--inference_model_dir" type=str help="The directory of model files. Valid when model_name not specifed.")<line_sep>parser.add_argument("--use_gpu" type=str default=<true> help="Whether use GPU.")<line_sep>parser.add_argument("--gpu_mem" type=int default=8000 help="")<line_sep>parser.add_argument("--enable_mkldnn" type=str2bool default=<false> help="Whether use MKLDNN. Valid when use_gpu is False")<line_sep>parser.add_argument("--cpu_num_threads" type=int default=1 help="")<line_sep>parser.add_argument("--use_tensorrt" type=str2bool default=<false> help="")<line_sep>parser.add_argument("--use_fp16" type=str2bool default=<false> help="")<line_sep>parser.add_argument("--batch_size" type=int default=1 help="Batch size. Default by 1.")<line_sep>parser.add_argument("--topk" type=int default=5 help="Return topk score(s) and corresponding results. Default by 5.")<line_sep>parser.add_argument("--class_id_map_file" type=str help="The path of file that map class_id and label.")<line_sep>parser.add_argument("--save_dir" type=str help="The directory to save prediction results as pre-label.")<line_sep>parser.add_argument("--resize_short" type=int default=256 help="Resize according to short size.")<line_sep>parser.add_argument("--crop_size" type=int default=224 help="Centor crop size.")<line_sep>args=parser.parse_args()<line_sep><return>vars(args)<block_end><def_stmt>print_info <block_start>"""Print list of supported models in formatted.
"""<line_sep>table=PrettyTable(["Series" "Name"])<try_stmt><block_start>sz=os.get_terminal_size()<line_sep>width=sz.columns-30<if>sz.columns<g>50<else>10<block_end><except_stmt>OSError<block_start>width=100<block_end><for_stmt>series MODEL_SERIES<block_start>names=textwrap.fill(" ".join(MODEL_SERIES[series]) width=width)<line_sep>table.add_row([series names])<block_end>width=len(str(table).split("\n")[0])<line_sep>print("{}".format("-"<times>width))<line_sep>print("Models supported by PaddleClas".center(width))<line_sep>print(table)<line_sep>print("Powered by PaddlePaddle!".rjust(width))<line_sep>print("{}".format("-"<times>width))<block_end><def_stmt>get_model_names <block_start>"""Get the model names list.
"""<line_sep>model_names=[]<for_stmt>series MODEL_SERIES<block_start>model_names<augadd>(MODEL_SERIES[series])<block_end><return>model_names<block_end><def_stmt>similar_architectures name="" names=[] thresh=0.1 topk=10<block_start>"""Find the most similar topk model names.
"""<line_sep>scores=[]<for_stmt>idx,n enumerate(names)<block_start><if_stmt>n.startswith("__")<block_start><continue><block_end>score=SequenceMatcher(<none> n.lower() name.lower()).quick_ratio()<if_stmt>score<g>thresh<block_start>scores.append((idx score))<block_end><block_end>scores.sort(key=<lambda>x:x[1] reverse=<true>)<line_sep>similar_names=[names[s[0]]<for>s scores[:min(topk len(scores))]]<line_sep><return>similar_names<block_end><def_stmt>download_with_progressbar url save_path<block_start>"""Download from url with progressbar.
"""<if_stmt>os.path.isfile(save_path)<block_start>os.remove(save_path)<block_end>response=requests.get(url stream=<true>)<line_sep>total_size_in_bytes=int(response.headers.get("content-length" 0))<line_sep>block_size=1024# 1 Kibibyte
progress_bar=tqdm(total=total_size_in_bytes unit="iB" unit_scale=<true>)<with_stmt>open(save_path "wb")<as>file<block_start><for_stmt>data response.iter_content(block_size)<block_start>progress_bar.update(len(data))<line_sep>file.write(data)<block_end><block_end>progress_bar.close()<if_stmt>total_size_in_bytes<eq>0<or>progress_bar.n<ne>total_size_in_bytes<or><not>os.path.isfile(save_path)<block_start><raise>Exception(f"Something went wrong while downloading file from {url}")<block_end><block_end><def_stmt>check_model_file model_name<block_start>"""Check the model files exist and download and untar when no exist.
"""<line_sep>storage_directory=partial(os.path.join BASE_INFERENCE_MODEL_DIR model_name)<line_sep>url=BASE_DOWNLOAD_URL.format(model_name)<line_sep>tar_file_name_list=["inference.pdiparams" "inference.pdiparams.info" "inference.pdmodel"]<line_sep>model_file_path=storage_directory("inference.pdmodel")<line_sep>params_file_path=storage_directory("inference.pdiparams")<if_stmt><not>os.path.exists(model_file_path)<or><not>os.path.exists(params_file_path)<block_start>tmp_path=storage_directory(url.split("/")[-1])<line_sep>print(f"download {url} to {tmp_path}")<line_sep>os.makedirs(storage_directory() exist_ok=<true>)<line_sep>download_with_progressbar(url tmp_path)<with_stmt>tarfile.open(tmp_path "r")<as>tarObj<block_start><for_stmt>member tarObj.getmembers()<block_start>filename=<none><for_stmt>tar_file_name tar_file_name_list<block_start><if_stmt>tar_file_name<in>member.name<block_start>filename=tar_file_name<block_end><block_end><if_stmt>filename<is><none><block_start><continue><block_end>file=tarObj.extractfile(member)<with_stmt>open(storage_directory(filename) "wb")<as>f<block_start>f.write(file.read())<block_end><block_end><block_end>os.remove(tmp_path)<block_end><if_stmt><not>os.path.exists(model_file_path)<or><not>os.path.exists(params_file_path)<block_start><raise>Exception(f"Something went wrong while praparing the model[{model_name}] files!")<block_end><return>storage_directory()<block_end><class_stmt>PaddleClas(object)<block_start>"""PaddleClas.
"""<line_sep>print_info()<def_stmt>__init__ self model_name:str=<none> inference_model_dir:str=<none> use_gpu:bool=<true> batch_size:int=1 topk:int=5 **kwargs<block_start>"""Init PaddleClas with config.
Args:
model_name (str, optional): The model name supported by PaddleClas. If specified, override config. Defaults to None.
inference_model_dir (str, optional): The directory that contained model file and params file to be used. If specified, override config. Defaults to None.
use_gpu (bool, optional): Whether use GPU. If specified, override config. Defaults to True.
batch_size (int, optional): The batch size to pridict. If specified, override config. Defaults to 1.
topk (int, optional): Return the top k prediction results with the highest score. Defaults to 5.
"""<line_sep>super().__init__()<line_sep>self._config=init_config(model_name inference_model_dir use_gpu batch_size topk **kwargs)<line_sep>self._check_input_model()<line_sep>self.cls_predictor=ClsPredictor(self._config)<block_end><def_stmt>get_config self<block_start>"""Get the config.
"""<line_sep><return>self._config<block_end><def_stmt>_check_input_model self<block_start>"""Check input model name or model files.
"""<line_sep>candidate_model_names=get_model_names()<line_sep>input_model_name=self._config.Global.get("model_name" <none>)<line_sep>inference_model_dir=self._config.Global.get("inference_model_dir" <none>)<if_stmt>input_model_name<is><not><none><block_start>similar_names=similar_architectures(input_model_name candidate_model_names)<line_sep>similar_names_str=", ".join(similar_names)<if_stmt>input_model_name<not><in>candidate_model_names<block_start>err=f"{input_model_name} is not provided by PaddleClas. \nMaybe you want: [{similar_names_str}]. \nIf you want to use your own model, please specify inference_model_dir!"<line_sep><raise>InputModelError(err)<block_end>self._config.Global.inference_model_dir=check_model_file(input_model_name)<line_sep><return><block_end><elif_stmt>inference_model_dir<is><not><none><block_start>model_file_path=os.path.join(inference_model_dir "inference.pdmodel")<line_sep>params_file_path=os.path.join(inference_model_dir "inference.pdiparams")<if_stmt><not>os.path.isfile(model_file_path)<or><not>os.path.isfile(params_file_path)<block_start>err=f"There is no model file or params file in this directory: {inference_model_dir}"<line_sep><raise>InputModelError(err)<block_end><return><block_end><else_stmt><block_start>err=f"Please specify the model name supported by PaddleClas or directory contained model files(inference.pdmodel, inference.pdiparams)."<line_sep><raise>InputModelError(err)<block_end><return><block_end><def_stmt>predict self input_data:Union[str np.array] print_pred:bool=<false><arrow>Generator[list <none> <none>]<block_start>"""Predict input_data.
Args:
input_data (Union[str, np.array]):
When the type is str, it is the path of image, or the directory containing images, or the URL of image from Internet.
When the type is np.array, it is the image data whose channel order is RGB.
print_pred (bool, optional): Whether print the prediction result. Defaults to False. Defaults to False.
Raises:
ImageTypeError: Illegal input_data.
Yields:
Generator[list, None, None]:
The prediction result(s) of input_data by batch_size. For every one image,
prediction result(s) is zipped as a dict, that includs topk "class_ids", "scores" and "label_names".
The format is as follow: [{"class_ids": [...], "scores": [...], "label_names": [...]}, ...]
"""<if_stmt>isinstance(input_data np.ndarray)<block_start>outputs=self.cls_predictor.predict(input_data)<line_sep><yield>self.cls_predictor.postprocess(outputs)<block_end><elif_stmt>isinstance(input_data str)<block_start><if_stmt>input_data.startswith("http")<or>input_data.startswith("https")<block_start>image_storage_dir=partial(os.path.join BASE_IMAGES_DIR)<if_stmt><not>os.path.exists(image_storage_dir())<block_start>os.makedirs(image_storage_dir())<block_end>image_save_path=image_storage_dir("tmp.jpg")<line_sep>download_with_progressbar(input_data image_save_path)<line_sep>input_data=image_save_path<line_sep>warnings.warn(f"Image to be predicted from Internet: {input_data}, has been saved to: {image_save_path}")<block_end>image_list=get_image_list(input_data)<line_sep>batch_size=self._config.Global.get("batch_size" 1)<line_sep>topk=self._config.PostProcess.get('topk' 1)<line_sep>img_list=[]<line_sep>img_path_list=[]<line_sep>cnt=0<for_stmt>idx,img_path enumerate(image_list)<block_start>img=cv2.imread(img_path)<if_stmt>img<is><none><block_start>warnings.warn(f"Image file failed to read and has been skipped. The path: {img_path}")<line_sep><continue><block_end>img=img[: : ::-1]<line_sep>img_list.append(img)<line_sep>img_path_list.append(img_path)<line_sep>cnt<augadd>1<if_stmt>cnt%batch_size<eq>0<or>(idx+1)<eq>len(image_list)<block_start>outputs=self.cls_predictor.predict(img_list)<line_sep>preds=self.cls_predictor.postprocess(outputs img_path_list)<if_stmt>print_pred<and>preds<block_start><for_stmt>pred preds<block_start>filename=pred.pop("file_name")<line_sep>pred_str=", ".join([f"{k}: {pred[k]}"<for>k pred])<line_sep>print(f"filename: {filename}, top-{topk}, {pred_str}")<block_end><block_end>img_list=[]<line_sep>img_path_list=[]<line_sep><yield>preds<block_end><block_end><block_end><else_stmt><block_start>err="Please input legal image! The type of image supported by PaddleClas are: NumPy.ndarray and string of local path or Ineternet URL"<line_sep><raise>ImageTypeError(err)<block_end><return><block_end><block_end># for CLI
<def_stmt>main <block_start>"""Function API used for commad line.
"""<line_sep>cfg=args_cfg()<line_sep>clas_engine=PaddleClas(**cfg)<line_sep>res=clas_engine.predict(cfg["infer_imgs"] print_pred=<true>)<for_stmt>_ res<block_start><pass><block_end>print("Predict complete!")<line_sep><return><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
"""Some utility functions for working with headline of Markdown.
Terminologies
- Headline :: The headline entity OR the text of the headline
- Content :: The content under the current headline. It stops after
encountering a headline with the same or higher level OR EOF.
"""<line_sep># Author: <NAME> <<EMAIL>>
<import_stmt>re<import_stmt>sublime<try_stmt><block_start><import_from_stmt>.utilities is_region_void<block_end><except_stmt>ValueError<block_start><import_from_stmt>utilities is_region_void<block_end>MATCH_PARENT=1# Match headlines at the same or higher level
MATCH_CHILD=2# Match headlines at the same or lower level
MATCH_SILBING=3# Only Match headlines at the same level.
MATCH_ANY=4# Any headlines would be matched.
ANY_LEVEL=-1# level used when MATCH_ANY is used as match type
<def_stmt>region_of_content_of_headline_at_point view from_point<block_start>"""Extract the region of the content of under current headline."""<line_sep>_,level=headline_and_level_at_point(view from_point)<if_stmt>level<eq><none><block_start><return><none><block_end><if_stmt>is_content_empty_at_point(view from_point)<block_start><return><none><block_end>line_num,_=view.rowcol(from_point)<line_sep>content_line_start_point=view.text_point(line_num+1 0)<line_sep>next_headline,_=find_headline(view content_line_start_point level <true> MATCH_PARENT)<if_stmt><not>is_region_void(next_headline)<block_start>end_pos=next_headline.a-1<block_end><else_stmt><block_start>end_pos=view.size()<block_end><return>sublime.Region(content_line_start_point end_pos)<block_end><def_stmt>headline_and_level_at_point view from_point search_above_and_down=<false><block_start>"""Return the current headline and level.
If from_point is inside a headline, then return the headline and level.
Otherwise depends on the argument it might search above and down.
"""<line_sep>line_region=view.line(from_point)<line_sep>line_content=view.substr(line_region)<line_sep># Update the level in case it's headline.ANY_LEVEL
level=_extract_level_from_headline(line_content)<line_sep># Search above and down
<if_stmt>level<is><none><and>search_above_and_down# Search above
<block_start>headline_region,_=find_headline(view from_point ANY_LEVEL <false> skip_folded=<true>)<if_stmt><not>is_region_void(headline_region)<block_start>line_content,level=headline_and_level_at_point(view headline_region.a)<block_end># Search down
<if_stmt>level<is><none><block_start>headline_region,_=find_headline(view from_point ANY_LEVEL <true> skip_folded=<true>)<if_stmt><not>is_region_void(headline_region)<block_start>line_content,level=headline_and_level_at_point(view headline_region.a)<block_end><block_end><block_end><return>line_content level<block_end><def_stmt>_extract_level_from_headline headline<block_start>"""Extract the level of headline, None if not found.
"""<line_sep>re_string=_get_re_string(ANY_LEVEL MATCH_ANY)<line_sep>match=re.match(re_string headline)<if_stmt>match<block_start><return>len(match.group(1))<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>is_content_empty_at_point view from_point<block_start>"""Check if the content under the current headline is empty.
For implementation, check if next line is a headline a the same
or higher level.
"""<line_sep>_,level=headline_and_level_at_point(view from_point)<if_stmt>level<is><none><block_start><raise>ValueError("from_point must be inside a valid headline.")<block_end>line_num,_=view.rowcol(from_point)<line_sep>next_line_region=view.line(view.text_point(line_num+1 0))<line_sep>next_line_content=view.substr(next_line_region)<line_sep>next_line_level=_extract_level_from_headline(next_line_content)<line_sep># Note that EOF works too in this case.
<if_stmt>next_line_level<and>next_line_level<le>level<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>find_headline view from_point level forward=<true> match_type=MATCH_ANY skip_headline_at_point=<false> skip_folded=<false><block_start>"""Return the region of the next headline or EOF.
Parameters
----------
view: sublime.view
from_point: int
From which to find.
level: int
The headline level to match.
forward: boolean
Search forward or backward
match_type: int
MATCH_SILBING, MATCH_PARENT, MATCH_CHILD or MATCH_ANY.
skip_headline_at_point: boolean
When searching whether skip the headline at point
skip_folded: boolean
Whether to skip the folded region
Returns
-------
match_region: int
Matched region, or None if not found.
match_level: int
The level of matched headline, or None if not found.
"""<if_stmt>skip_headline_at_point# Move the point to the next line if we are
# current in a headline already.
<block_start>from_point=_get_new_point_if_already_in_headline(view from_point forward)<block_end>re_string=_get_re_string(level match_type)<if_stmt>forward<block_start>match_region=view.find(re_string from_point)<block_end><else_stmt><block_start>all_match_regions=view.find_all(re_string)<line_sep>match_region=_nearest_region_among_matches_from_point(view all_match_regions from_point <false> skip_folded)<block_end><if_stmt>skip_folded<block_start><while_stmt>(_is_region_folded(match_region view))<block_start>from_point=match_region.b<line_sep>match_region=view.find(re_string from_point)<block_end><block_end><if_stmt><not>is_region_void(match_region)<block_start><if_stmt><not>is_scope_headline(view match_region.a)<block_start><return>find_headline(view match_region.a level forward match_type <true> skip_folded)<block_end><else_stmt>## Extract the level of matched headlines according to the region
<block_start>headline=view.substr(match_region)<line_sep>match_level=_extract_level_from_headline(headline)<block_end><block_end><else_stmt><block_start>match_level=<none><block_end><return>(match_region match_level)<block_end><def_stmt>_get_re_string level match_type=MATCH_ANY<block_start>"""Get regular expression string according to match type.
Return regular expression string, rather than compiled string. Since
sublime's view.find function needs string.
Parameters
----------
match_type: int
MATCH_SILBING, MATCH_PARENT, MATCH_CHILD or ANY_LEVEL.
"""<if_stmt>match_type<eq>MATCH_ANY<block_start>re_string=r'^(#+)\s.*'<block_end><else_stmt><block_start><try_stmt><block_start><if_stmt>match_type<eq>MATCH_PARENT<block_start>re_string=r'^(#{1,%d})\s.*'%level<block_end><elif_stmt>match_type<eq>MATCH_CHILD<block_start>re_string=r'^(#{%d,})\s.*'%level<block_end><elif_stmt>match_type<eq>MATCH_SILBING<block_start>re_string=r'^(#{%d,%d})\s.*'%(level level)<block_end><block_end><except_stmt>ValueError<block_start>print("match_type has to be specified if level isn't ANY_LEVE")<block_end><block_end><return>re_string<block_end><def_stmt>_get_new_point_if_already_in_headline view from_point forward=<true><block_start>line_content=view.substr(view.line(from_point))<if_stmt>_extract_level_from_headline(line_content)<block_start>line_num,_=view.rowcol(from_point)<if_stmt>forward<block_start><return>view.text_point(line_num+1 0)<block_end><else_stmt><block_start><return>view.text_point(line_num 0)-1<block_end><block_end><else_stmt><block_start><return>from_point<block_end><block_end><def_stmt>is_scope_headline view from_point<block_start><return>view.score_selector(from_point "markup.heading")<g>0<or>view.score_selector(from_point "meta.block-level.markdown")<g>0<block_end><def_stmt>_nearest_region_among_matches_from_point view all_match_regions from_point forward=<false> skip_folded=<true><block_start>"""Find the nearest matched region among all matched regions.
None if not found.
"""<line_sep>nearest_region=<none><for_stmt>r all_match_regions<block_start><if_stmt><not>forward<and>r.b<le>from_point<and>(<not>nearest_region<or>r.a<g>nearest_region.a)<block_start>candidate=r<block_end><elif_stmt>forward<and>r.a<ge>from_point<and>(<not>nearest_region<or>r.b<l>nearest_region.b)<block_start>candidate=r<block_end><else_stmt><block_start><continue><block_end><if_stmt>skip_folded<and><not>_is_region_folded(candidate view)<block_start>nearest_region=candidate<block_end><block_end><return>nearest_region<block_end><def_stmt>_is_region_folded region view<block_start><for_stmt>i view.folded_regions()<block_start><if_stmt>i.contains(region)<block_start><return><true><block_end><block_end><return><false><block_end> |
<import_from_stmt>argparse ArgumentParser<import_stmt>numpy<as>np<import_stmt>requests<import_from_stmt>mmcls.apis inference_model init_model show_result_pyplot<def_stmt>parse_args <block_start>parser=ArgumentParser()<line_sep>parser.add_argument('img' help='Image file')<line_sep>parser.add_argument('config' help='Config file')<line_sep>parser.add_argument('checkpoint' help='Checkpoint file')<line_sep>parser.add_argument('model_name' help='The model name in the server')<line_sep>parser.add_argument('--inference-addr' default='127.0.0.1:8080' help='Address and port of the inference server')<line_sep>parser.add_argument('--device' default='cuda:0' help='Device used for inference')<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>main args# Inference single image by native apis.
<block_start>model=init_model(args.config args.checkpoint device=args.device)<line_sep>model_result=inference_model(model args.img)<line_sep>show_result_pyplot(model args.img model_result title='pytorch_result')<line_sep># Inference single image by torchserve engine.
url='http://'+args.inference_addr+'/predictions/'+args.model_name<with_stmt>open(args.img 'rb')<as>image<block_start>response=requests.post(url image)<block_end>server_result=response.json()<line_sep>show_result_pyplot(model args.img server_result title='server_result')<assert_stmt>np.allclose(model_result['pred_score'] server_result['pred_score'])<line_sep>print('Test complete, the results of PyTorch and TorchServe are the same.')<block_end><if_stmt>__name__<eq>'__main__'<block_start>args=parse_args()<line_sep>main(args)<block_end> |
#
# https://stackoverflow.com/a/47983927/1832058
#
<import_stmt>tkinter<as>tk<line_sep>root=tk.Tk()<line_sep>root.geometry('250x250')<line_sep>root.title('Canvas')<line_sep>canvas=tk.Canvas(root width=250 height=250)<line_sep>canvas.pack()<line_sep>img=tk.PhotoImage(file='hal_9000.gif')<line_sep>canvas.create_image((0 0) image=img anchor='nw')<line_sep>canvas.create_text((10 100) text='Username' anchor='w' fill='white' font=('Arial' 10))<line_sep>canvas.create_text((10 150) text='Password' anchor='w' fill='white' font=('Arial' 10))<line_sep>name_entry=tk.Entry(canvas)<line_sep>password_entry=tk.Entry(canvas show='*')<line_sep>canvas.create_window((240 100) window=name_entry anchor='e')<line_sep>canvas.create_window((240 150) window=password_entry anchor='e')<line_sep>root.mainloop()<line_sep> |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for generating the feature_statistics proto.
The proto is used as input for the Overview visualization.
"""<import_stmt>warnings<import_from_stmt>.base_feature_statistics_generator BaseFeatureStatisticsGenerator<import_from_stmt>. feature_statistics_pb2<as>fs<class_stmt>FeatureStatisticsGenerator(BaseFeatureStatisticsGenerator)<block_start>"""Generator of stats proto from TF data."""<def_stmt>__init__ self<block_start>BaseFeatureStatisticsGenerator.__init__(self fs.FeatureNameStatistics fs.DatasetFeatureStatisticsList fs.Histogram)<block_end><block_end><def_stmt>ProtoFromTfRecordFiles files max_entries=10000 features=<none> is_sequence=<false> iterator_options=<none><block_start>"""Creates a feature statistics proto from a set of TFRecord files.
Args:
files: A list of dicts describing files for each dataset for the proto.
Each
entry contains a 'path' field with the path to the TFRecord file on
disk
and a 'name' field to identify the dataset in the proto.
max_entries: The maximum number of examples to load from each dataset
in order to create the proto. Defaults to 10000.
features: A list of strings that is a whitelist of feature names to create
feature statistics for. If set to None then all features in the
dataset
are analyzed. Defaults to None.
is_sequence: True if the input data from 'tables' are tf.SequenceExamples,
False if tf.Examples. Defaults to false.
iterator_options: Options to pass to the iterator that reads the examples.
Defaults to None.
Returns:
The feature statistics proto for the provided files.
"""<line_sep>warnings.warn('Use GenericFeatureStatisticsGenerator class method instead.' DeprecationWarning)<line_sep><return>FeatureStatisticsGenerator().ProtoFromTfRecordFiles(files max_entries features is_sequence iterator_options)<block_end> |
<import_from_stmt>eventlet greenthread<import_from_stmt>eventlet.zipkin api<line_sep>__original_init__=greenthread.GreenThread.__init__<line_sep>__original_main__=greenthread.GreenThread.main<def_stmt>_patched__init self parent# parent thread saves current TraceData from tls to self
<block_start><if_stmt>api.is_tracing()<block_start>self.trace_data=api.get_trace_data()<block_end>__original_init__(self parent)<block_end><def_stmt>_patched_main self function args kwargs# child thread inherits TraceData
<block_start><if_stmt>hasattr(self 'trace_data')<block_start>api.set_trace_data(self.trace_data)<block_end>__original_main__(self function args kwargs)<block_end><def_stmt>patch <block_start>greenthread.GreenThread.__init__=_patched__init<line_sep>greenthread.GreenThread.main=_patched_main<block_end><def_stmt>unpatch <block_start>greenthread.GreenThread.__init__=__original_init__<line_sep>greenthread.GreenThread.main=__original_main__<block_end> |
<import_from_stmt>webrecorder.basecontroller BaseController<import_from_stmt>webrecorder.models.importer ImportStatusChecker<line_sep># ============================================================================
<class_stmt>AppController(BaseController)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(AppController self).__init__(*args **kwargs)<line_sep>config=kwargs['config']<line_sep># Auto Import on Init Id
self.init_import_id=config.get('init_import_id')<line_sep>self.init_import_username=config.get('init_import_user')<line_sep>self.init_import_coll_name=config.get('init_import_coll')<block_end><def_stmt>init_routes self<block_start>@self.app.get(['/' '/index.html'])@self.jinja2_view('index.html' refresh_cookie=<false>)<def_stmt>home_page <block_start>self.redir_host()<line_sep>resp={'is_home':'1'}<if_stmt>self.init_import_id<block_start><return>self.handle_player_load(resp)<block_end><if_stmt><not>self.access.session_user.is_anon()<block_start>coll_list=self.access.session_user.get_collections()<line_sep>resp['collections']=[coll.serialize()<for>coll coll_list]<line_sep>resp['coll_title']=''<line_sep>resp['rec_title']=''<block_end><else_stmt><block_start>self.fill_anon_info(resp)<block_end><return>resp<block_end>@self.app.get('/_faq')@self.jinja2_view('faq.html')<def_stmt>faq <block_start><return>{}<block_end>@self.app.get('/_documentation')@self.jinja2_view('howtoguide.html')<def_stmt>documentation <block_start><return>{}<block_end>@self.app.get('/_policies')@self.jinja2_view('policies.html')<def_stmt>policies <block_start><return>{}<block_end># Expiry Message
@self.app.route('/_expire')<def_stmt>expire <block_start>self.flash_message('Sorry, the anonymous collection has expired due to inactivity')<line_sep>self.redirect('/')<block_end><block_end><def_stmt>handle_player_load self resp<block_start>""" Initial warc load for player
"""<line_sep>user=self.user_manager.all_users[self.init_import_username]<line_sep>status_checker=ImportStatusChecker(self.redis)<line_sep>upload_status=status_checker.get_upload_status(user self.init_import_id)<line_sep># if upload already finished, redirect to known coll
<if_stmt><not>upload_status<or>upload_status.get('done')<block_start><if_stmt>user<and>self.init_import_coll_name<block_start>self.redirect('/'+user.name+'/'+self.init_import_coll_name)<block_end><block_end>resp['upload_status']=upload_status<or>{}<line_sep><return>resp<block_end><block_end> |
<import_from_future_stmt> division<import_stmt>sys<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>datashader.glyphs Glyph<import_from_stmt>datashader.glyphs.line _build_draw_segment _build_map_onto_pixel_for_line<import_from_stmt>datashader.utils ngjit<line_sep>py2_skip=pytest.mark.skipif(sys.version_info.major<l>3 reason="py2 not supported")<line_sep>mapper=ngjit(<lambda>x:x)<line_sep>map_onto_pixel=_build_map_onto_pixel_for_line(mapper mapper)<line_sep>sx,tx,sy,ty=1 0 1 0<line_sep>xmin,xmax,ymin,ymax=0 5 0 5<line_sep>@pytest.fixture<def_stmt>draw_line <block_start>@ngjit<def_stmt>append i x y agg<block_start>agg[y x]<augadd>1<block_end>expand_aggs_and_cols=Glyph._expand_aggs_and_cols(append 1)<line_sep><return>_build_draw_segment(append map_onto_pixel expand_aggs_and_cols <false>)<block_end>@[email protected](group="draw_line")<def_stmt>test_draw_line_left_border benchmark draw_line<block_start>n=10<power>4<line_sep>x0,y0=(0 0)<line_sep>x1,y1=(0 n)<line_sep>agg=np.zeros((n+1 n+1) dtype='i4')<line_sep>benchmark(draw_line sx tx sy ty xmin xmax ymin ymax x0 y0 x1 y1 0 <true> agg)<block_end>@[email protected](group="draw_line")<def_stmt>test_draw_line_diagonal benchmark draw_line<block_start>n=10<power>4<line_sep>x0,y0=(0 0)<line_sep>x1,y1=(n n)<line_sep>agg=np.zeros((n+1 n+1) dtype='i4')<line_sep>benchmark(draw_line sx tx sy ty xmin xmax ymin ymax x0 y0 x1 y1 0 <true> agg)<block_end>@[email protected](group="draw_line")<def_stmt>test_draw_line_offset benchmark draw_line<block_start>n=10<power>4<line_sep>x0,y0=(0 n<floordiv>4)<line_sep>x1,y1=(n n<floordiv>4-1)<line_sep>agg=np.zeros((n+1 n+1) dtype='i4')<line_sep>benchmark(draw_line sx tx sy ty xmin xmax ymin ymax x0 y0 x1 y1 0 <true> agg)<block_end> |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>logging<import_from_stmt>itertools zip_longest<import_from_stmt>typing Dict Tuple Any Union Optional List<import_from_stmt>flask_marshmallow Marshmallow# type: ignore
<import_from_stmt>flask_sqlalchemy DefaultMeta# type: ignore
<import_from_stmt>flask_sqlalchemy SQLAlchemy<as>SQLAlchemyBase<import_from_stmt>sqlalchemy Index Column Integer func DateTime inspect<import_from_stmt>sqlalchemy.ext.declarative declared_attr<import_from_stmt>sqlalchemy.orm Mapper RelationshipProperty<import_from_stmt>sqlalchemy.orm.attributes History<import_from_stmt>sqlalchemy.orm.interfaces MapperProperty<import_from_stmt>sqlalchemy.orm.state InstanceState AttributeState<line_sep>log=logging.getLogger(__name__)<line_sep># Adding the "pool_pre_ping" command to avoid mysql server has gone away issues.
# Note: This will slightly degrade performance. It might be better to adjust
# MariaDB server settings.
<class_stmt>SQLAlchemy(SQLAlchemyBase)<block_start><def_stmt>apply_pool_defaults self app options<block_start>options=super().apply_pool_defaults(app options)<line_sep>options["pool_pre_ping"]=<true><line_sep><return>options<block_end><block_end>db=SQLAlchemy()<line_sep>ma=Marshmallow()# pylint: disable=invalid-name
BaseModel:DefaultMeta=db.Model<line_sep>ChangeUnion=Union[Tuple[Any Any] Dict[str Any] List[Any]]<line_sep>Changes=Dict[str ChangeUnion]<class_stmt>MainBase(BaseModel)# N.B. We leave the schema out on purpose as alembic gets confused
# otherwise. The default schema is already main (as specified in the
# connection string). Also see:
# https://github.com/sqlalchemy/alembic/issues/519#issuecomment-442533633
# __table_args__ = {'schema': 'main'}
<block_start>__abstract__=<true><line_sep>id=Column(Integer autoincrement=<true> primary_key=<true>)<line_sep>date_created=Column(DateTime default=func.current_timestamp())<line_sep>date_modified=Column(DateTime default=func.current_timestamp() onupdate=func.current_timestamp() )<def_stmt>model_changes self * already_tested=<none><arrow>Changes<block_start>"""Returns the changed attributes of this instance.
Returns:
a dictionary mapping the attributes to (new, old) tuples or a
recursive version if the attribute is a list or reference.
"""<def_stmt>inner current<arrow>Optional[Union[List[Any] Changes]]<block_start><if_stmt>isinstance(current list)<block_start>res=[inner(item)<for>item current]<if_stmt>any(res)<block_start><return>res<block_end><block_end><elif_stmt>hasattr(current "model_changes")<block_start><return>current.model_changes(already_tested=already_tested)<block_end><return><none><block_end>changes:Changes={}<if_stmt>already_tested<is><none><block_start>already_tested={id(self)}<block_end><elif_stmt>id(self)<in>already_tested<block_start><return>changes<block_end>already_tested.add(id(self))<line_sep>state:InstanceState=inspect(self)<line_sep>attr:AttributeState<for_stmt>name,attr state.attrs.items()<block_start>hist:History=attr.load_history()<if_stmt>hist.has_changes()<block_start>changes[name]=hist[0] hist[2]<block_end><else_stmt><block_start>subchanges=inner(getattr(self name))<if_stmt>subchanges<block_start>changes[name]=subchanges<block_end><block_end><block_end><return>changes<block_end><def_stmt>diff self other:BaseModel * already_tested=<none><arrow>Changes<block_start>"""Returns the difference between this instance and the given one.
Returns:
a dictionary mapping the attributes to (new, old) tuples or a
recursive version if the attribute is a list or reference.
"""<line_sep>changes:Changes={}<if_stmt>already_tested<is><none><block_start>already_tested={id(self) id(other)}<block_end><elif_stmt>id(self)<in>already_tested<and>id(other)<in>already_tested<block_start><return>changes<block_end>already_tested.add(id(self))<line_sep>already_tested.add(id(other))<if_stmt>id(self)<eq>id(other)# identity cache
<block_start>log.warning("Comparing the same instance (%r). Identity cache?" self)<line_sep><return>self.model_changes()<block_end>clz=type(self)<line_sep>oclz=type(other)<if_stmt><not>isinstance(other clz)<block_start><raise>TypeError("Instance of {} expected. Got {}".format(clz.__name__ oclz.__name__))<block_end><def_stmt>innerdiff current other<arrow>Optional[ChangeUnion]<block_start><if_stmt>current<is><none><and>other<is><none><block_start><return><none><block_end><if_stmt>current<is><none><or>other<is><none><block_start><return>(current other)<block_end><if_stmt>hasattr(current "diff")<block_start><return>current.diff(other already_tested=already_tested)<block_end><if_stmt>isinstance(current list)<and>isinstance(other list)<block_start>res=[]<for_stmt>cur,oth zip_longest(current other)<block_start>res.append(innerdiff(cur oth))<block_end><if_stmt>all(res)<block_start><return>res<block_end><block_end><elif_stmt>current<ne>other<block_start><return>(current other)<block_end><return><none><block_end>mapper:Mapper=inspect(clz)<line_sep>name:str<line_sep>attr:MapperProperty<for_stmt>name,attr mapper.attrs.items()# type: ignore
# log.debug('Compare %s of %s <> %s', name, clz, oclz)
<block_start>other_value=getattr(other name)<line_sep>current_value=getattr(self name)<if_stmt>isinstance(attr RelationshipProperty)<and>other_value<is><none><block_start><for_stmt>col attr.local_columns<block_start>cname=col.name<if_stmt>innerdiff(getattr(self cname) getattr(other cname))<block_start><break><block_end><block_end><else_stmt><block_start><continue><block_end><if_stmt>name<in>changes<block_start><continue><block_end><block_end>subchanges=innerdiff(current_value other_value)<if_stmt>subchanges<block_start>changes[name]=subchanges<block_end><block_end><return>changes<block_end><block_end><class_stmt>NvdBase(BaseModel)<block_start>__abstract__=<true><line_sep>@declared_attr<def_stmt>__table_args__ cls# pylint: disable=no-self-argument
<block_start>indices=[]<line_sep>idx_format="idx_{tbl_name}_{col_name}"<for_stmt>key cls.__dict__<block_start>attribute=cls.__dict__[key]<line_sep># pylint: disable=no-member
<if_stmt><not>isinstance(attribute db.Column)<or><not>attribute.index<block_start><continue><block_end># pylint: enable=no-member
# Disable Index
attribute.index=<none><line_sep># Create a custom index here.
indices.append(Index(idx_format.format(tbl_name=cls.__tablename__ col_name=key) key))<block_end>indices.append({"schema":"cve"})<line_sep><return>tuple(indices)<block_end><block_end><class_stmt>CweBase(BaseModel)<block_start>__table_args__={"schema":"cwe"}<line_sep>__abstract__=<true><block_end> |
<import_from_stmt>datetime datetime<import_from_stmt>operator itemgetter<import_from_stmt>os.path dirname join<import_stmt>pytest# noqa
<import_from_stmt>city_scrapers_core.constants BOARD PASSED<import_from_stmt>city_scrapers_core.utils file_response<import_from_stmt>freezegun freeze_time<import_from_stmt>city_scrapers.spiders.chi_land_trust ChiLandTrustSpider<line_sep>test_response=file_response(join(dirname(__file__) "files" "chi_land_trust.html") url="https://www.chicago.gov/city/en/depts/doh/supp_info/chicago_communitylandtrust0.html" # noqa
)<line_sep>spider=ChiLandTrustSpider()<line_sep>freezer=freeze_time("2019-07-11")<line_sep>freezer.start()<line_sep>parsed_items=sorted([item<for>item spider.parse(test_response)] key=itemgetter("start"))<line_sep>freezer.stop()<def_stmt>test_count <block_start><assert_stmt>len(parsed_items)<eq>13<block_end><def_stmt>test_title <block_start><assert_stmt>parsed_items[-6]["title"]<eq>"Board of Directors"<block_end><def_stmt>test_description <block_start><assert_stmt>parsed_items[-6]["description"]<eq>""<block_end><def_stmt>test_start <block_start><assert_stmt>parsed_items[-6]["start"]<eq>datetime(2019 2 7 9 0)<block_end><def_stmt>test_end <block_start><assert_stmt>parsed_items[-6]["end"]<is><none><block_end><def_stmt>test_time_notes <block_start><assert_stmt>parsed_items[-6]["time_notes"]<eq>"See agenda to confirm time"<block_end><def_stmt>test_id <block_start><assert_stmt>parsed_items[-6]["id"]<eq>"chi_land_trust/201902070900/x/board_of_directors"<block_end><def_stmt>test_status <block_start><assert_stmt>parsed_items[-6]["status"]<eq>PASSED<block_end><def_stmt>test_location <block_start><assert_stmt>parsed_items[-6]["location"]<eq>spider.location<block_end><def_stmt>test_source <block_start><assert_stmt>(parsed_items[-6]["source"]<eq>"https://www.chicago.gov/city/en/depts/doh/supp_info/chicago_communitylandtrust0.html"# noqa
)<block_end><def_stmt>test_links <block_start><assert_stmt>parsed_items[-6]["links"]<eq>[{"href":"https://www.chicago.gov/content/dam/city/depts/doh/general/CCLT_February_2019_Agernda.pdf" # noqa
"title":"Agenda" }]<block_end><def_stmt>test_classification <block_start><assert_stmt>parsed_items[-6]["classification"]<eq>BOARD<block_end><def_stmt>test_all_day <block_start><assert_stmt>parsed_items[-6]["all_day"]<is><false><block_end> |
<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.contrib.slim<as>slim<def_stmt>lrelu x leak=0.2 name="lrelu"<block_start><with_stmt>tf.variable_scope(name)<block_start>f1=0.5<times>(1+leak)<line_sep>f2=0.5<times>(1-leak)<line_sep><return>f1<times>x+f2<times>abs(x)<block_end><block_end><def_stmt>selu x<block_start>alpha=1.6732632423543772848170429916717<line_sep>scale=1.0507009873554804934193349852946<line_sep><return>scale<times>tf.where(x<g>0.0 x alpha<times>tf.exp(x)-alpha)<block_end><def_stmt>huber_loss labels predictions delta=1.0<block_start>residual=tf.abs(predictions-labels)<line_sep>condition=tf.less(residual delta)<line_sep>small_res=0.5<times>tf.square(residual)<line_sep>large_res=delta<times>residual-0.5<times>tf.square(delta)<line_sep><return>tf.where(condition small_res large_res)<block_end><def_stmt>conv2d input output_shape is_train activation_fn=tf.nn.relu k_h=5 k_w=5 s_h=2 s_w=2 stddev=0.02 name="conv2d"<block_start><with_stmt>tf.variable_scope(name)<block_start>w=tf.get_variable('w' [k_h k_w input.get_shape()[-1] output_shape] initializer=tf.truncated_normal_initializer(stddev=stddev))<line_sep>conv=tf.nn.conv2d(input w strides=[1 s_h s_w 1] padding='SAME')<line_sep>biases=tf.get_variable('biases' [output_shape] initializer=tf.constant_initializer(0.0))<line_sep>activation=activation_fn(conv+biases)<line_sep>bn=tf.contrib.layers.batch_norm(activation center=<true> scale=<true> decay=0.9 is_training=is_train updates_collections=<none>)<block_end><return>bn<block_end><def_stmt>fc input output_shape activation_fn=tf.nn.relu name="fc"<block_start>output=slim.fully_connected(input int(output_shape) activation_fn=activation_fn)<line_sep><return>output<block_end> |
<import_stmt>time<def_stmt>main request response<block_start>time.sleep(2)<line_sep><return>200 [] b''<block_end> |
# Copyright (c) 2017-2020 <NAME>.
# Author: <NAME>
# Email: <EMAIL>
# Update: 2020 - 2 - 7
# Image customized decoder for YV12([Y][U/4][V/4]), YV21([Y][V/4][U/4])
# NOTE: [Y][U][V] means Y/U/V channel is a planar channel, [U/4] means
# U channel is sub-sampled by a factor of [2, 2]
<import_stmt>numpy<as>np<import_from_stmt>PIL ImageFile<class_stmt>YV12Decoder(ImageFile.PyDecoder)<block_start>"""PIL.Image.DECODERS for YV12 format raw bytes
Registered in `Image.DECODERS`, don't use this class directly!
"""<def_stmt>__init__ self mode *args<block_start>super(YV12Decoder self).__init__(mode *args)<block_end><def_stmt>decode self buffer<block_start><if_stmt>self.mode<eq>'L'# discard UV channel
<block_start>self.set_as_raw(buffer 'L')<block_end><else_stmt><block_start>w,h=self.im.size<line_sep>y=np.frombuffer(buffer 'uint8' count=w<times>h)<line_sep>u=np.frombuffer(buffer 'uint8' count=w<times>h<floordiv>4 offset=w<times>h)<line_sep>v=np.frombuffer(buffer 'uint8' count=w<times>h<floordiv>4 offset=w<times>h+w<times>h<floordiv>4)<line_sep>y=np.reshape(y [h w])<line_sep>u=np.reshape(u [h<floordiv>2 w<floordiv>2])<line_sep>v=np.reshape(v [h<floordiv>2 w<floordiv>2])<line_sep>u=u[np.arange(h)<floordiv>2][: np.arange(w)<floordiv>2]<line_sep>v=v[np.arange(h)<floordiv>2][: np.arange(w)<floordiv>2]<line_sep>yuv=np.stack([y u v] axis=-1)<line_sep>self.set_as_raw(yuv.flatten().tobytes())<block_end><return>-1 0<block_end><block_end><class_stmt>YV21Decoder(ImageFile.PyDecoder)<block_start>"""PIL.Image.DECODERS for YV21 format raw bytes
Registered in `Image.DECODERS`, don't use this class directly!
"""<def_stmt>__init__ self mode *args<block_start>super(YV21Decoder self).__init__(mode *args)<block_end><def_stmt>decode self buffer<block_start><if_stmt>self.mode<eq>'L'# discard UV channel
<block_start>self.set_as_raw(buffer 'L')<block_end><else_stmt><block_start>w,h=self.im.size<line_sep>y=np.frombuffer(buffer 'uint8' count=w<times>h)<line_sep>v=np.frombuffer(buffer 'uint8' count=w<times>h<floordiv>4 offset=w<times>h)<line_sep>u=np.frombuffer(buffer 'uint8' count=w<times>h<floordiv>4 offset=w<times>h+w<times>h<floordiv>4)<line_sep>y=np.reshape(y [h w])<line_sep>u=np.reshape(u [h<floordiv>2 w<floordiv>2])<line_sep>v=np.reshape(v [h<floordiv>2 w<floordiv>2])<line_sep>u=u[np.arange(h)<floordiv>2][: np.arange(w)<floordiv>2]<line_sep>v=v[np.arange(h)<floordiv>2][: np.arange(w)<floordiv>2]<line_sep>yuv=np.stack([y u v] axis=-1)<line_sep>self.set_as_raw(yuv.flatten().tobytes())<block_end><return>-1 0<block_end><block_end> |
#
# This file is part of LiteX.
#
# Copyright (c) 2014-2015 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
<import_from_stmt>migen *<import_from_stmt>litex.soc.interconnect.csr *<line_sep># Xilinx DNA (Device Identifier) -------------------------------------------------------------------
<class_stmt>DNA(Module AutoCSR)<block_start><def_stmt>__init__ self<block_start>n=57<line_sep>self._id=CSRStatus(n)<line_sep># # #
self.do=do=Signal()<line_sep>self.count=count=Signal(max=2<times>n+1)<line_sep>self.clk=clk=Signal()<line_sep>self.comb<augadd>clk.eq(count[0])<line_sep>self.specials<augadd>Instance("DNA_PORT" i_DIN=self._id.status[-1] o_DOUT=do i_CLK=clk i_READ=count<l>2 i_SHIFT=1)<line_sep>self.sync<augadd>[If(count<l>2<times>n count.eq(count+1) If(clk self._id.status.eq(Cat(do self._id.status))))]<block_end><def_stmt>add_timing_constraints self platform sys_clk_freq sys_clk<block_start>platform.add_period_constraint(self.clk 2<times>1e9/sys_clk_freq)<line_sep>platform.add_false_path_constraints(self.clk sys_clk)<block_end><block_end> |
<import_stmt>os<import_stmt>csv<import_stmt>time<import_stmt>math<import_stmt>pandas<as>pd<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision.utils<as>vutils<import_from_stmt>torch.optim.sgd SGD<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>optims OCGD BCGD2<import_from_stmt>train_utils get_data weights_init_d weights_init_g get_diff save_checkpoint lr_scheduler generate_data icrScheduler get_model<import_from_stmt>losses get_loss<line_sep># seed = torch.randint(0, 1000000, (1,))
seed=2020<line_sep>torch.manual_seed(seed=seed)<line_sep>print('random seed : %d'%seed)<def_stmt>train_ocgd epoch_num=10 optim_type='BCGD2' startPoint=<none> logdir='test' update_min=<true> z_dim=128 batchsize=64 loss_name='WGAN' model_name='dc' data_path='None' dataname='cifar10' device='cpu' gpu_num=1 collect_info=<false><block_start>lr_d=0.01<line_sep>lr_g=0.01<line_sep>dataset=get_data(dataname=dataname path='../datas/%s'%data_path)<line_sep>dataloader=DataLoader(dataset=dataset batch_size=batchsize shuffle=<true> num_workers=4)<line_sep>D,G=get_model(model_name=model_name z_dim=z_dim)<line_sep>D.to(device)<line_sep>G.to(device)<if_stmt>startPoint<is><not><none><block_start>chk=torch.load(startPoint)<line_sep>D.load_state_dict(chk['D'])<line_sep>G.load_state_dict(chk['G'])<line_sep>print('Start from %s'%startPoint)<block_end>optimizer=OCGD(max_params=G.parameters() min_params=D.parameters() udpate_min=update_min device=device)<line_sep>loss_list=[]<line_sep>count=0<for_stmt>e range(epoch_num)<block_start><for_stmt>real_x dataloader<block_start>real_x=real_x[0].to(device)<line_sep>d_real=D(real_x)<line_sep>z=torch.randn((real_x.shape[0] z_dim) device=device)<line_sep>fake_x=G(z)<line_sep>d_fake=D(fake_x)<line_sep>D_loss=get_loss(name=loss_name g_loss=<false> d_real=d_real d_fake=d_fake)<line_sep>optimizer.zero_grad()<line_sep>optimizer.step(loss=D_loss)<if_stmt>count%100<eq>0<block_start>print('Iter %d, Loss: %.5f'%(count D_loss.item()))<line_sep>loss_list.append(D_loss.item())<block_end>count<augadd>1<block_end>print('epoch{%d/%d}'%(e epoch_num))<block_end>name='overtrainD.pth'<if>update_min<else>'overtrainG.pth'<line_sep>save_checkpoint(path=logdir name=name D=D G=G)<line_sep>loss_data=pd.DataFrame(loss_list)<line_sep>loss_data.to_csv('logs/train_oneside.csv')<block_end><if_stmt>__name__<eq>'__main__'<block_start>torch.backends.cudnn.benchmark=<true><line_sep>device=torch.device('cuda:0'<if>torch.cuda.is_available()<else>'cpu')<line_sep>print(device)<line_sep>chk='checkpoints/0.00000MNIST-0.0100/SGD-0.01000_9000.pth'<line_sep>train_ocgd(epoch_num=10 startPoint=chk z_dim=96 update_min=<true> data_path='mnist' dataname='MNIST' loss_name='JSD' model_name='mnist' batchsize=128 device=device)<block_end> |
# Copyright (c) 2020 Trail of Bits, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
<import_stmt>os<def_stmt>strip_whole_config filename<block_start><if_stmt><not>filename.endswith(".config")<block_start><return>""<block_end>filename=filename.rstrip(".config")<line_sep>basename,ext=os.path.splitext(filename)<line_sep><return>basename<block_end><def_stmt>get_binaries directory<block_start>result=set()<for_stmt>f os.listdir(directory)<block_start>filename=strip_whole_config(f)<if_stmt>filename<block_start>result.add(filename)<block_end><block_end><return>result<block_end><def_stmt>get_tags config<block_start><with_stmt>open(config 'r')<as>f<block_start>line=f.readline().rstrip('\n')<line_sep>tokens=line.split(' ')<if_stmt>tokens[0]<ne>'TAGS:'<block_start><return>[]<block_end><return>tokens[1:]<block_end><block_end><def_stmt>get_bin2tags directory<block_start>result={}<for_stmt>f os.listdir(directory)<block_start>filename=strip_whole_config(f)<if_stmt><not>filename<block_start><continue><block_end>tags=get_tags(os.path.join(directory f))<if_stmt>filename<not><in>result<block_start>result[filename]=tags<block_end><else_stmt><block_start>result[filename].append(tags)<block_end><block_end><return>result<block_end><def_stmt>get_cfg directory name<block_start><return>os.path.join(directory name+'.cfg')<block_end> |
<import_from_stmt>conans ConanFile tools<import_from_stmt>conans.errors ConanException<import_from_stmt>io StringIO<import_stmt>os<import_stmt>textwrap<class_stmt>TestPackageConan(ConanFile)<block_start>settings="os" "arch" "compiler" "build_type"<line_sep>@property<def_stmt>_m4_input_path self<block_start><return>os.path.join(self.build_folder "input.m4")<block_end><def_stmt>build self<block_start>tools.save(self._m4_input_path textwrap.dedent("""\
m4_define(NAME1, `<NAME>.')
m4_define(NAME2, `Sally')
m4_define(MET, `$1 met $2')
MET(`NAME1', `NAME2')
"""))<block_end><def_stmt>test self<block_start><if_stmt>hasattr(self "settings_build")<block_start>exe_suffix=".exe"<if>self.settings.os<eq>"Windows"<else>""<line_sep>m4_bin=os.path.join(self.deps_cpp_info["m4"].rootpath "bin" "m4"+exe_suffix)<block_end><else_stmt><block_start>m4_bin=tools.get_env("M4")<if_stmt>m4_bin<is><none><or><not>m4_bin.startswith(self.deps_cpp_info["m4"].rootpath)<block_start><raise>ConanException("M4 environment variable not set")<block_end><block_end><if_stmt><not>tools.cross_building(self skip_x64_x86=<true>)<block_start>self.run("{} --version".format(m4_bin) run_environment=<true>)<line_sep>self.run("{} -P {}".format(m4_bin self._m4_input_path))<line_sep>self.run("m4 -R {0}/frozen.m4f {0}/test.m4".format(self.source_folder) run_environment=<true>)<line_sep>output=StringIO()<line_sep>self.run("{} -P {}".format(m4_bin self._m4_input_path) output=output)<assert_stmt>"<NAME>. met Sally"<in>output.getvalue()<block_end><block_end><block_end> |
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
<import_from_future_stmt> absolute_import<import_stmt>numpy<import_stmt>pytest<import_stmt>uproot<line_sep>@pytest.mark.network<def_stmt>test <block_start><with_stmt>uproot.open("https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/RD_distribution.root:tree")<as>f<block_start>whole_branch=f["vchi2_b"].array(library="np")<assert_stmt>whole_branch[0]<eq>5.234916687011719<assert_stmt>whole_branch[-1]<eq>12.466843605041504<line_sep>whole_branch=f["mu_pt_sum"].array(library="np")<assert_stmt>whole_branch[0]<eq>26.4675350189209<assert_stmt>whole_branch[-1]<eq>39.84319305419922<block_end><block_end> |
""" 测试单(场景)
松山湖AI制造业推理平台性能测试SLI/SLO
1. 通过HTTP接口推送原始数据集和推理脚本(具体数量、频次待定)
2. 平台将数据写入nfs/ceph、数据库的读写性能测试(以及IOPS)
3. 100批量数据标注、图像预览响应测试
4. 数据集、模型的增删改查的接口响应(暂定32x6个模型、数据集)
5. 模型转换测试(暂定32x6个模型、数据集)
6. 数据集转换测试(暂定32x6个模型、数据集)
7. 10x32x6个分布式推理任务调度的稳定性
8. 64mpbs,128Mbps图片流量的负载测试
9. 测试(客户)环境rabbitmq的吞吐量和响应延时
10. 1000次/s的HTTP推理请求失败率
11. 1000次/s的HTTP推理结果请求失败率(上传到平台数据库)
12. 1/1000不良率的告警响应测试
13. master节点在模型转换、数据集转换时IO,CPU,MEM的使用率
14. master、A3010在满载推理业务时的网络负载,IO,CPU,MEM占用率
# ScriptType:performance test
# UpdateDate: 2021.03-4
# Matainer: thomas
# Env: Win10 64bit, python3.8
"""<import_from_stmt>locust HttpUser TaskSet task between<import_from_stmt>locust.contrib.fasthttp FastHttpUser<import_from_stmt>locust events<import_from_stmt>locust.clients HttpSession<import_stmt>logging<import_stmt>json<import_stmt>os<import_stmt>yaml<import_stmt>pdb<import_stmt>hashlib<import_from_stmt>testhub.testlib fake_users<import_from_stmt>testhub.testlib csv_client<line_sep>TEST_CONF=os.path.join(os.path.abspath(os.path.dirname(os.path.abspath(__file__))+os.path.sep) "datas.yaml")<line_sep>TEST_DATAS={}<line_sep>DATA_PREFIX="songshanhu"<line_sep>USER_CREDENTIALS=[]<def_stmt>read_test_datas conf_file=TEST_CONF<block_start>stream={}<with_stmt>open(conf_file 'r')<as>cf<block_start>stream=cf.read()<block_end>conf=yaml.safe_load(stream)<line_sep><return>conf<block_end>@events.quitting.add_listener<def_stmt>_ environment **kw<block_start><if_stmt>environment.stats.total.fail_ratio<g>0.001<block_start>logging.error("Test failed due to failure ratio > 1%")<line_sep>environment.process_exit_code=1<block_end><elif_stmt>environment.stats.total.avg_response_time<g>5000<block_start>logging.error("Test failed due to average response time ratio > 200 ms")<line_sep>environment.process_exit_code=2<block_end><elif_stmt>environment.stats.total.get_response_time_percentile(0.99)<g>2000<block_start>logging.error("Test failed due to 95th percentile response time > 800 ms")<line_sep>environment.process_exit_code=3<block_end><else_stmt><block_start>environment.process_exit_code=0<block_end><block_end><class_stmt>Datasets(TaskSet)<block_start>""" testsuite
1. 通过HTTP接口推送原始数据集和推理脚本(具体数量、频次待定)
2. 平台将数据写入nfs/ceph、数据库的读写性能测试(以及IOPS)
4. 数据集、模型的增删改查的接口响应(暂定32x6个模型、数据集)
5. 模型转换测试(暂定32x6个模型、数据集)
6. 数据集转换测试(暂定32x6个模型、数据集)
13. master节点在模型转换、数据集转换时IO,CPU,MEM的使用率
14. master、A3010在满载推理业务时的网络负载,IO,CPU,MEM占用率
"""<line_sep><global>TEST_DATAS<line_sep>datasets_session={}<def_stmt>on_start self<block_start>print("======================= A new test is starting, user will login {} ! =======================".format(TEST_DATAS["ENV"]["HOST"]))<line_sep>self.client.request("get" TEST_DATAS["RESTFULAPI"]["homepage"])<line_sep>self.client.header=TEST_DATAS["RESTFULAPI"]["header"]<line_sep>aaccount=USER_CREDENTIALS.pop()<line_sep>response=self.client.request("post" url=TEST_DATAS["RESTFULAPI"]["login"]["path"] data=data)<line_sep>result=response.json()<line_sep># pdb.set_trace()
<try_stmt><block_start><if_stmt>result["success"]<block_start>TEST_DATAS["ACCOUNT"]["token"]=result["token"]<line_sep>TEST_DATAS["ACCOUNT"]["currentRole_id"]=result["currentRole"][0]["id"]<line_sep>TEST_DATAS["RESTFULAPI"]["header"]["Authorization"]="Bearer "+TEST_DATAS["ACCOUNT"]["token"]<line_sep>TEST_DATAS["RESTFULAPI"]["cookie"]=response.cookies<block_end><block_end><except_stmt>KeyError<block_start>response.raise_for_status()<block_end><block_end><def_stmt>on_stop self<block_start>print("======================= A test is ending, user will logout {} ! =======================".format(TEST_DATAS["ENV"]["HOST"]))<line_sep>response=self.client.request("get" url=TEST_DATAS["RESTFULAPI"]["logout"]["path"])<block_end>@task(1)<def_stmt>test_create_dataset self<block_start>""" testcases
1. 注册新用户组
"""<line_sep>datasets_info=fake_users.new_datastes_songshanhu()<with_stmt>self.client.request("post" url=TEST_DATAS["RESTFULAPI"]["create_group"]["path"] headers=TEST_DATAS["RESTFULAPI"]["header"] json=datasets_info)<as>resp<block_start>self.datasets_session["datasets_id"]=resp["data"]["id"]<line_sep>self.datasets_session["datasetCode"]=resp["data"]["datasetCode"]<block_end><block_end>@task(0)<def_stmt>test_upload_datas self<block_start>""" testcases
2. 上传压缩包
"""<line_sep>self.datasets_session["datasets_id"]=resp["data"]["id"]<line_sep>self.datasets_session["datasetCode"]=resp["data"]["datasetCode"]<with_stmt>self.client.request("post" url=TEST_DATAS["RESTFULAPI"]["create_group"]["path"] headers=TEST_DATAS["RESTFULAPI"]["header"] json=datasets_info)<as>resp<block_start>self.datasets_session["datasets_uploaded_path"]=resp["data"]<block_end><block_end><block_end><class_stmt>BasicalDatas(HttpUser)<block_start><global>TEST_DATAS<line_sep><global>USER_CREDENTIALS<line_sep>sock=<none><line_sep>wait_time=between(0.5 2)<line_sep>TEST_DATAS=read_test_datas(conf_file=TEST_CONF)<line_sep>USER_CREDENTIALS=[{'userName':ic['userName'] 'password':ic['password']}<for>ic csv_client.csv_reader_as_json(csv_path=TEST_DATAS["ACCOUNT"]["CSV_PATH"])<if>"userName"<ne>ic['userName']]<line_sep>host=TEST_DATAS["ENV"]["HOST"]<line_sep>tasks=[Datasets]<block_end><if_stmt>__name__<eq>"__main__"# global DATA_PREFIX
<block_start>DATA_PREFIX="songshanhu"<line_sep><pass><line_sep># locust -f testhub/testsuites/songshanhu/test_datasets.py --conf testhub/testsuites/songshanhu/host.conf
<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<import_from_stmt>typing Callable Optional<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>egg.core.interaction LoggingStrategy<class_stmt>ContinuousLinearSender(nn.Module)<block_start><def_stmt>__init__ self agent:nn.Module encoder_input_size:int encoder_hidden_size:int=64 num_layers:int=1 activation:str="relu" <block_start>super(ContinuousLinearSender self).__init__()<line_sep>self.agent=agent<line_sep>activations={"relu":F.relu "tanh":F.tanh "leaky_relu":F.leaky_relu "identity":nn.Identity() }<line_sep>self.activation=activations[activation.lower()]<line_sep>encoder_hidden_sizes=[encoder_hidden_size]<times>num_layers<line_sep>encoder_layer_dimensions=[(encoder_input_size encoder_hidden_sizes[0])]<for_stmt>i,hidden_size enumerate(encoder_hidden_sizes[1:])<block_start>hidden_shape=(self.encoder_hidden_sizes[i] hidden_size)<line_sep>encoder_layer_dimensions.append(hidden_shape)<block_end>self.encoder_hidden_layers=nn.ModuleList([nn.Linear(*dimensions)<for>dimensions encoder_layer_dimensions])<block_end><def_stmt>forward self x aux_input=<none><block_start>x=self.agent(x aux_input)<for_stmt>hidden_layer self.encoder_hidden_layers[:-1]<block_start>x=self.activation(hidden_layer(x))<block_end>sender_output=self.encoder_hidden_layers[-1](x)<line_sep><return>sender_output<block_end><block_end><class_stmt>ContinuousLinearReceiver(nn.Module)<block_start><def_stmt>__init__ self agent:nn.Module <block_start>super(ContinuousLinearReceiver self).__init__()<line_sep>self.agent=agent<block_end><def_stmt>forward self message input=<none> aux_input=<none><block_start>agent_output=self.agent(message input aux_input)<line_sep><return>agent_output<block_end><block_end><class_stmt>SenderReceiverContinuousCommunication(nn.Module)<block_start><def_stmt>__init__ self sender:nn.Module receiver:nn.Module loss:Callable train_logging_strategy:Optional[LoggingStrategy]=<none> test_logging_strategy:Optional[LoggingStrategy]=<none> <block_start>"""
:param sender: Sender agent. sender.forward() has to output a continouos vector
:param receiver: Receiver agent. receiver.forward() has to accept two parameters:
message and receiver_input.
`message` is shaped as (batch_size, vocab_size).
:param loss: Callable that outputs differentiable loss, takes the following parameters:
* sender_input: input to Sender (comes from dataset)
* message: message sent from Sender
* receiver_input: input to Receiver from dataset
* receiver_output: output of Receiver
* labels: labels that come from dataset
:param train_logging_strategy, test_logging_strategy: specify what parts of interactions to persist for
later analysis in the callbacks.
"""<line_sep>super(SenderReceiverContinuousCommunication self).__init__()<line_sep>self.sender=sender<line_sep>self.receiver=receiver<line_sep>self.loss=loss<line_sep>self.train_logging_strategy=(LoggingStrategy()<if>train_logging_strategy<is><none><else>train_logging_strategy)<line_sep>self.test_logging_strategy=(LoggingStrategy()<if>test_logging_strategy<is><none><else>test_logging_strategy)<block_end><def_stmt>forward self sender_input labels receiver_input=<none> aux_input=<none><block_start>message=self.sender(sender_input aux_input)<line_sep>receiver_output=self.receiver(message receiver_input aux_input)<line_sep>loss,aux_info=self.loss(sender_input message receiver_input receiver_output labels aux_input)<line_sep>logging_strategy=(self.train_logging_strategy<if>self.training<else>self.test_logging_strategy)<line_sep>interaction=logging_strategy.filtered_interaction(sender_input=sender_input receiver_input=receiver_input labels=labels aux_input=aux_input receiver_output=receiver_output message=message.detach() message_length=torch.ones(message[0].size(0)) aux=aux_info )<line_sep><return>loss.mean() interaction<block_end><block_end> |
'''
VideoNull: empty implementation of VideoBase for the no provider case
'''<import_from_stmt>kivy.core.video VideoBase<class_stmt>VideoNull(VideoBase)<block_start>'''VideoBase implementation when there is no provider.
'''<line_sep><pass><block_end> |
# coding: utf-8
<import_from_stmt>jsl StringField ArrayField Var DictField NotField Document DocumentField <import_from_stmt>jsl.fields.compound BaseOfField<line_sep>a=StringField()<line_sep>b=StringField()<line_sep>c=StringField()<line_sep>d=StringField()<line_sep>e=StringField()<line_sep>f=StringField()<line_sep>g=StringField()<line_sep>h=StringField()<line_sep>j=StringField()<def_stmt>test_array_field <block_start>field=ArrayField(Var({'role_1':a 'role_2':b 'role_none':<none> }) additional_items=Var({'role_3':c 'role_4':d 'role_1':e 'role_none':<none> }))<assert_stmt>set(field.iter_fields())<eq>set([a b c d e])<assert_stmt>set(field.resolve_and_iter_fields('role_1'))<eq>set([a e])<assert_stmt>set(field.resolve_and_iter_fields('role_3'))<eq>set([c])<assert_stmt>set(field.resolve_and_iter_fields('role_none'))<eq>set([])<line_sep>field=ArrayField(Var({'role_1':(a b) 'role_2':c}) additional_items=d)<assert_stmt>set(field.iter_fields())<eq>set([a b c d])<line_sep>field=ArrayField((Var({'role_1':a 'role_2':b 'role_none':<none>}) c))<assert_stmt>set(field.iter_fields())<eq>set([a b c])<assert_stmt>set(field.resolve_and_iter_fields('role_1'))<eq>set([a c])<assert_stmt>set(field.resolve_and_iter_fields('role_none'))<eq>set([c])<line_sep>field=ArrayField(a additional_items=b)<assert_stmt>set(field.iter_fields())<eq>set([a b])<assert_stmt>set(field.resolve_and_iter_fields('some_role'))<eq>set([a b])<line_sep>field=ArrayField()<assert_stmt>set(field.iter_fields())<eq>set([])<block_end><def_stmt>test_dict_field <block_start>field=DictField(properties=Var({'role_1':{'a':Var({'role_a':a 'role_none':<none> }) 'b':b 'role_none':<none> } 'role_2':{'c':c} 'role_none':<none> }) pattern_properties=Var({'role_3':{'x*':Var({'role_b':d 'role_none':<none> }) } 'role_4':{'y*':e} 'role_none':<none> }) additional_properties=Var({'role_5':f 'role_6':g 'role_none':<none> }))<assert_stmt>set(field.iter_fields())<eq>set([a b c d e f g])<line_sep>field=DictField(properties={'a':a} pattern_properties={'b':b} additional_properties=c)<assert_stmt>set(field.iter_fields())<eq>set([a b c])<line_sep>field=DictField()<assert_stmt>set(field.iter_fields())<eq>set([])<block_end><def_stmt>test_base_of_field <block_start>field=BaseOfField((a b))<assert_stmt>set(field.iter_fields())<eq>set([a b])<line_sep>field=BaseOfField(Var({'role_1':(a b) 'role_2':c 'role_3':<none> # probably should raise?
}))<assert_stmt>set(field.iter_fields())<eq>set([a b c])<block_end><def_stmt>test_not_field <block_start>field=NotField(a)<assert_stmt>set(field.iter_fields())<eq>set([a])<assert_stmt>set(field.resolve_and_iter_fields('some_role'))<eq>set([a])<line_sep>field=NotField(Var({'role_1':a 'role_2':b 'role_3':<none> # probably should raise?
}))<assert_stmt>set(field.iter_fields())<eq>set([a b])<assert_stmt>set(field.resolve_and_iter_fields('role_1'))<eq>set([a])<assert_stmt>set(field.resolve_and_iter_fields('role_3'))<eq>set([])<block_end><def_stmt>test_document_field <block_start><class_stmt>A(Document)<block_start>a=a<line_sep>b=b<block_end>field=DocumentField(A)<assert_stmt>set(field.iter_fields())<eq>set([a b])<class_stmt>B(Document)<block_start>field=Var({'a':a 'b':b})<line_sep>b=c<block_end>field=DocumentField(B)<assert_stmt>set(field.iter_fields())<eq>set([a b c])<class_stmt>C(Document)<block_start><pass><block_end>field=DocumentField(C)<assert_stmt>set(field.iter_fields())<eq>set([])<block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>yaml<import_stmt>pickle<import_from_stmt>aw_nas germ<import_from_stmt>aw_nas.weights_manager.base BaseWeightsManager<import_from_stmt>aw_nas.common rollout_from_genotype_str<line_sep>ss=germ.GermSearchSpace()<line_sep>wm=BaseWeightsManager.get_class_("germ")(ss "cuda" rollout_type="germ" germ_supernet_type="nds_resnexta" germ_supernet_cfg={"num_classes":10 "stem_type":"res_stem_cifar" "group_search":<true>})<line_sep>arch_file=sys.argv[1]<line_sep>gt_file=sys.argv[2]<line_sep># ---- parse arch file ----
<with_stmt>open(arch_file "r")<as>r_f<block_start>archs=yaml.load(r_f)<block_end>nogroup_archs=[]<for_stmt>arch archs<block_start>rollout=rollout_from_genotype_str(arch ss)<if_stmt>all(rollout['num_groups.{}'.format(i)]<eq>1<for>i range(3))# all `num_groups` == 1
<block_start>[rollout.arch.pop("num_groups.{}".format(i))<for>i range(3)]<line_sep>nogroup_archs.append(rollout.genotype)<block_end><block_end>out_arch_fname=os.path.join(os.path.dirname(arch_file) "nogroup_{}".format(os.path.basename(arch_file)))<line_sep>print("Dumped {} archs to {}".format(len(nogroup_archs) out_arch_fname))<with_stmt>open(out_arch_fname "w")<as>w_f<block_start>yaml.dump(nogroup_archs w_f)<block_end># ---- parse gt pickle file ----
<with_stmt>open(gt_file "rb")<as>r_f<block_start>gt=pickle.load(r_f)<block_end>nogroup_gts=[]<for_stmt>arch,param,flops,acc zip(*gt)<block_start>rollout=rollout_from_genotype_str(arch ss)<if_stmt>all(rollout['num_groups.{}'.format(i)]<eq>1<for>i range(3))# all `num_groups` == 1
<block_start>[rollout.arch.pop("num_groups.{}".format(i))<for>i range(3)]<line_sep>nogroup_gts.append([rollout.genotype param flops acc])<block_end><block_end>nogroup_gts=list(zip(*nogroup_gts))<line_sep>out_gt_fname=os.path.join(os.path.dirname(gt_file) "nogroup_{}".format(os.path.basename(gt_file)))<with_stmt>open(out_gt_fname "wb")<as>w_f<block_start>pickle.dump(nogroup_gts w_f)<block_end>print("Dumped {} gt entries to {}".format(len(nogroup_gts[0]) out_gt_fname))<line_sep> |
<import_stmt>os<import_stmt>pytest<import_stmt>yaml<import_from_stmt>kopf._cogs.structs.credentials LoginError<import_from_stmt>kopf._core.intents.piggybacking has_kubeconfig login_with_kubeconfig<line_sep>MINICONFIG='''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
clusters:
- name: clstr
users:
- name: usr
'''<line_sep>@pytest.mark.parametrize('envs' [{} {'KUBECONFIG':''}] ids=['absent' 'empty'])<def_stmt>test_has_no_kubeconfig_when_nothing_is_provided mocker envs<block_start>exists_mock=mocker.patch('os.path.exists' return_value=<false>)<line_sep>mocker.patch.dict(os.environ envs clear=<true>)<line_sep>result=has_kubeconfig()<assert_stmt>result<is><false><assert_stmt>exists_mock.call_count<eq>1<assert_stmt>exists_mock.call_args_list[0][0][0].endswith('/.kube/config')<block_end>@pytest.mark.parametrize('envs' [{'KUBECONFIG':'x'}] ids=['set'])<def_stmt>test_has_kubeconfig_when_envvar_is_set_but_no_homedir mocker envs<block_start>exists_mock=mocker.patch('os.path.exists' return_value=<false>)<line_sep>mocker.patch.dict(os.environ envs clear=<true>)<line_sep>result=has_kubeconfig()<assert_stmt>result<is><true><assert_stmt>exists_mock.call_count<eq>1<assert_stmt>exists_mock.call_args_list[0][0][0].endswith('/.kube/config')<block_end>@pytest.mark.parametrize('envs' [{} {'KUBECONFIG':''}] ids=['absent' 'empty'])<def_stmt>test_has_kubeconfig_when_homedir_exists_but_no_envvar mocker envs<block_start>exists_mock=mocker.patch('os.path.exists' return_value=<true>)<line_sep>mocker.patch.dict(os.environ envs clear=<true>)<line_sep>result=has_kubeconfig()<assert_stmt>result<is><true><assert_stmt>exists_mock.call_count<eq>1<assert_stmt>exists_mock.call_args_list[0][0][0].endswith('/.kube/config')<block_end>@pytest.mark.parametrize('envs' [{} {'KUBECONFIG':''}] ids=['absent' 'empty'])<def_stmt>test_homedir_is_used_if_it_exists tmpdir mocker envs<block_start>exists_mock=mocker.patch('os.path.exists' return_value=<true>)<line_sep>open_mock=mocker.patch('kopf._core.intents.piggybacking.open')<line_sep>open_mock.return_value.__enter__.return_value.read.return_value=MINICONFIG<line_sep>mocker.patch.dict(os.environ envs clear=<true>)<line_sep>credentials=login_with_kubeconfig()<assert_stmt>exists_mock.call_count<eq>1<assert_stmt>exists_mock.call_args_list[0][0][0].endswith('/.kube/config')<assert_stmt>open_mock.call_count<eq>1<assert_stmt>open_mock.call_args_list[0][0][0].endswith('/.kube/config')<assert_stmt>credentials<is><not><none><block_end>@pytest.mark.parametrize('envs' [{} {'KUBECONFIG':''}] ids=['absent' 'empty'])<def_stmt>test_homedir_is_ignored_if_it_is_absent tmpdir mocker envs<block_start>exists_mock=mocker.patch('os.path.exists' return_value=<false>)<line_sep>open_mock=mocker.patch('kopf._core.intents.piggybacking.open')<line_sep>open_mock.return_value.__enter__.return_value.read.return_value=''<line_sep>mocker.patch.dict(os.environ envs clear=<true>)<line_sep>credentials=login_with_kubeconfig()<assert_stmt>exists_mock.call_count<eq>1<assert_stmt>exists_mock.call_args_list[0][0][0].endswith('/.kube/config')<assert_stmt>open_mock.call_count<eq>0<assert_stmt>credentials<is><none><block_end><def_stmt>test_absent_kubeconfig_fails tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<with_stmt>pytest.raises(IOError)<block_start>login_with_kubeconfig()<block_end><block_end><def_stmt>test_corrupted_kubeconfig_fails tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>kubeconfig.write("""!!acb!.-//:""")# invalid yaml
mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<with_stmt>pytest.raises(yaml.YAMLError)<block_start>login_with_kubeconfig()<block_end><block_end><def_stmt>test_empty_kubeconfig_fails tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>kubeconfig.write('')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<with_stmt>pytest.raises(LoginError)<as>err<block_start>login_with_kubeconfig()<block_end><assert_stmt>"context is not set"<in>str(err.value)<block_end><def_stmt>test_mini_kubeconfig_reading tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>kubeconfig.write(MINICONFIG)<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.server<is><none><assert_stmt>credentials.insecure<is><none><assert_stmt>credentials.scheme<is><none><assert_stmt>credentials.token<is><none><assert_stmt>credentials.certificate_path<is><none><assert_stmt>credentials.certificate_data<is><none><assert_stmt>credentials.private_key_path<is><none><assert_stmt>credentials.private_key_data<is><none><assert_stmt>credentials.ca_path<is><none><assert_stmt>credentials.ca_data<is><none><assert_stmt>credentials.password<is><none><assert_stmt>credentials.username<is><none><assert_stmt>credentials.default_namespace<is><none><block_end><def_stmt>test_full_kubeconfig_reading tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>kubeconfig.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
- name: def
clusters:
- name: clstr
cluster:
server: https://hostname:1234/
certificate-authority-data: base64dataA
certificate-authority: /pathA
insecure-skip-tls-verify: true
- name: hij
users:
- name: usr
user:
username: uname
password: <PASSWORD>
client-certificate-data: base64dataC
client-certificate: /pathC
client-key-data: base64dataK
client-key: /pathK
token: tkn
- name: klm
''')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.server<eq>'https://hostname:1234/'<assert_stmt>credentials.insecure<eq><true><assert_stmt>credentials.scheme<is><none><assert_stmt>credentials.token<eq>'<PASSWORD>'<assert_stmt>credentials.certificate_path<eq>'/pathC'<assert_stmt>credentials.certificate_data<eq>'base64dataC'<assert_stmt>credentials.private_key_path<eq>'/pathK'<assert_stmt>credentials.private_key_data<eq>'base64dataK'<assert_stmt>credentials.ca_path<eq>'/pathA'<assert_stmt>credentials.ca_data<eq>'base64dataA'<assert_stmt>credentials.password<eq>'<PASSWORD>'<assert_stmt>credentials.username<eq>'uname'<assert_stmt>credentials.default_namespace<eq>'ns'<block_end><def_stmt>test_kubeconfig_with_provider_token tmpdir mocker<block_start>kubeconfig=tmpdir.join('config')<line_sep>kubeconfig.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
clusters:
- name: clstr
users:
- name: usr
user:
auth-provider:
config:
access-token: <PASSWORD>
''')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=str(kubeconfig))<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.token<eq>'<PASSWORD>'<block_end><def_stmt>test_merged_kubeconfigs_across_currentcontext tmpdir mocker<block_start>kubeconfig1=tmpdir.join('config1')<line_sep>kubeconfig1.write('''
kind: Config
current-context: ctx
''')<line_sep>kubeconfig2=tmpdir.join('config2')<line_sep>kubeconfig2.write('''
kind: Config
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
clusters:
- name: clstr
cluster:
server: srv
users:
- name: usr
user:
token: <PASSWORD>
''')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.default_namespace<eq>'ns'<assert_stmt>credentials.server<eq>'srv'<assert_stmt>credentials.token<eq>'<PASSWORD>'<block_end><def_stmt>test_merged_kubeconfigs_across_contexts tmpdir mocker<block_start>kubeconfig1=tmpdir.join('config1')<line_sep>kubeconfig1.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns
''')<line_sep>kubeconfig2=tmpdir.join('config2')<line_sep>kubeconfig2.write('''
kind: Config
clusters:
- name: clstr
cluster:
server: srv
users:
- name: usr
user:
token: tkn
''')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.default_namespace<eq>'ns'<assert_stmt>credentials.server<eq>'srv'<assert_stmt>credentials.token<eq>'<PASSWORD>'<block_end><def_stmt>test_merged_kubeconfigs_first_wins tmpdir mocker<block_start>kubeconfig1=tmpdir.join('config1')<line_sep>kubeconfig1.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns1
clusters:
- name: clstr
cluster:
server: srv1
users:
- name: usr
user:
token: <PASSWORD>
''')<line_sep>kubeconfig2=tmpdir.join('config2')<line_sep>kubeconfig2.write('''
kind: Config
current-context: ctx
contexts:
- name: ctx
context:
cluster: clstr
user: usr
namespace: ns2
clusters:
- name: clstr
cluster:
server: srv2
users:
- name: usr
user:
token: <PASSWORD>
''')<line_sep>mocker.patch.dict(os.environ clear=<true> KUBECONFIG=f'{kubeconfig1}{os.pathsep}{kubeconfig2}')<line_sep>credentials=login_with_kubeconfig()<assert_stmt>credentials<is><not><none><assert_stmt>credentials.default_namespace<eq>'ns1'<assert_stmt>credentials.server<eq>'srv1'<assert_stmt>credentials.token<eq>'<PASSWORD>'<block_end> |
<class_stmt>Solution<block_start><def_stmt>nthUglyNumber self n<block_start>ugly=[1]<line_sep>i2=i3=i5=0<while_stmt>len(ugly)<l>n<block_start><while_stmt>ugly[i2]<times>2<le>ugly[-1]<block_start>i2<augadd>1<block_end><while_stmt>ugly[i3]<times>3<le>ugly[-1]<block_start>i3<augadd>1<block_end><while_stmt>ugly[i5]<times>5<le>ugly[-1]<block_start>i5<augadd>1<block_end>ugly.append(min(ugly[i2]<times>2 ugly[i3]<times>3 ugly[i5]<times>5))<block_end><return>ugly[-1]<block_end><block_end> |
# Local imports
<import_from_stmt>uplink returns<def_stmt>test_returns request_builder<block_start>custom=returns(str)<line_sep>request_builder.get_converter.return_value=str<line_sep>request_builder.return_type=returns.ReturnType.with_decorator(<none> custom)<line_sep>custom.modify_request(request_builder)<assert_stmt>request_builder.return_type(2)<eq>"2"<block_end><def_stmt>test_returns_with_multiple_decorators request_builder mocker<block_start>decorator1=returns(str)<line_sep>decorator2=returns.json()<line_sep>request_builder.get_converter.return_value=str<line_sep>first_type=returns.ReturnType.with_decorator(<none> decorator1)<line_sep>second_type=(request_builder.return_type)=returns.ReturnType.with_decorator(first_type decorator2)<line_sep># Verify that the return type doesn't change after being handled by first decorator
decorator1.modify_request(request_builder)<assert_stmt>request_builder.return_type<is>second_type<line_sep># Verify that the second decorator does handle the return type
mock_response=mocker.Mock()<line_sep>mock_response.json.return_value={"key":"value"}<line_sep>decorator2.modify_request(request_builder)<assert_stmt>request_builder.return_type(mock_response)<eq>str(mock_response.json())<block_end><def_stmt>test_returns_json request_builder mocker<block_start>mock_response=mocker.Mock()<line_sep>mock_response.json.return_value={"key":"value"}<line_sep>request_builder.get_converter.return_value=str<line_sep>returns_json=returns.json(str ())<line_sep>request_builder.return_type=returns.ReturnType.with_decorator(<none> returns_json)<line_sep>returns_json.modify_request(request_builder)<assert_stmt>isinstance(request_builder.return_type returns.ReturnType)<assert_stmt>callable(request_builder.return_type)<assert_stmt>request_builder.return_type(mock_response)<eq>str(mock_response.json())<line_sep># Verify: Idempotent
returns_json.modify_request(request_builder)<assert_stmt>isinstance(request_builder.return_type returns.ReturnType)<assert_stmt>callable(request_builder.return_type)<assert_stmt>request_builder.return_type(mock_response)<eq>str(mock_response.json())<line_sep># Verify: Doesn't apply to unsupported types
request_builder.get_converter.return_value=<none><line_sep>returns_json=returns.json(str ())<line_sep>request_builder.return_type=returns.ReturnType.with_decorator(<none> returns_json)<line_sep>returns_json.modify_request(request_builder)<assert_stmt><not>callable(request_builder.return_type)<block_end><def_stmt>test_returns_JsonStrategy mocker<block_start>response=mocker.Mock(spec=["json"])<line_sep>response.json.return_value={"hello":"world"}<line_sep>converter=returns.JsonStrategy(<lambda>x:x "hello")<assert_stmt>converter(response)<eq>"world"<line_sep>converter=returns.JsonStrategy(<lambda>y:y+"!" "hello")<assert_stmt>converter(response)<eq>"world!"<assert_stmt>returns.JsonStrategy(1).unwrap()<eq>1<block_end> |
<import_from_stmt>.client is_delayed is_function_graft value_graft keyref_graft apply_graft function_graft merge_value_grafts guid isolate_keys parametrize consistent_guid <line_sep>__all__=["is_delayed" "is_function_graft" "value_graft" "keyref_graft" "apply_graft" "function_graft" "merge_value_grafts" "guid" "isolate_keys" "parametrize" "consistent_guid" ]<line_sep> |
# Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for partitioning."""<import_from_stmt>typing Any<import_from_stmt>absl.testing absltest<import_from_stmt>flax core<as>flax_core<import_from_stmt>flax optim<import_from_stmt>flax.linen partitioning<as>flax_partitioning<import_stmt>jax<import_stmt>numpy<as>np<import_from_stmt>t5x train_state<as>train_state_lib<import_from_stmt>t5x.contrib.moe partitioning<as>moe_partitioning<import_from_stmt>t5x.contrib.moe training_utils<line_sep>mock=absltest.mock<line_sep>AxisMetadata=flax_partitioning.AxisMetadata<line_sep>DataLayout=moe_partitioning.DataLayout<line_sep>FlaxOptimTrainState=train_state_lib.FlaxOptimTrainState<line_sep>InferenceState=train_state_lib.InferenceState<line_sep>PartitionSpec=moe_partitioning.PartitionSpec<line_sep>PRNGKey=Any<class_stmt>LogicalAdam(optim.Adam)<block_start>"""Subclass of Adam optimizer with T5X logical axis partitioning support."""<def_stmt>derive_logical_axes self optimizer_state param_logical_axes<block_start>"""Derives optimizer logical partitioning from model logical partitions."""<del_stmt>param_logical_axes# Return fixed axes for test
optimizer_logical_axes={'state':{'param_states':{'logits_dense':{'grad_ema':<none> 'grad_sq_ema':<none>} 'mlp':{'wo':{'kernel':{'grad_ema':PartitionSpec('embed' 'mlp') 'grad_sq_ema':<none>}}}} 'step':<none>} 'target':{'logits_dense':PartitionSpec('vocab' 'embed') 'mlp':{'wo':{'kernel':PartitionSpec('embed' 'mlp') } } }}<line_sep><return>optimizer_state.restore_state(optimizer_logical_axes)<block_end><block_end><def_stmt>create_optimizer <block_start>"""Creates simple Adam optimizer."""<line_sep>target={'logits_dense':np.ones((16 16) np.float32) 'mlp':{'wo':{'kernel':np.ones((32 16) np.float32)}}}<line_sep><return>LogicalAdam(learning_rate=1e-4).create(target)<block_end><class_stmt>PartitioningTest(absltest.TestCase)<block_start><def_stmt>test_default_data_layout self# No expert replication required. Use default data layout.
<block_start>partitioner=moe_partitioning.MoePjitPartitioner(num_experts=8 num_partitions=1)<line_sep>self.assertFalse(partitioner.two_data_axes)<line_sep>self.assertEqual(partitioner.get_data_layout(batch_size=32) DataLayout(batch_size=32 shard_id=0 num_shards=1 is_first_host_in_replica_set=<true>))<block_end><def_stmt>test_two_data_axis_layout_override self<block_start>partitioner=moe_partitioning.MoePjitPartitioner(num_experts=8 num_partitions=1)<line_sep># Force override case to check layout is valid.
partitioner.two_data_axes=<true><line_sep>partitioner._data_axis=('data' 'model')<line_sep>self.assertEqual(partitioner.get_data_layout(batch_size=8) DataLayout(batch_size=8 shard_id=0 num_shards=1 is_first_host_in_replica_set=<true>))<block_end><def_stmt>test_logical_axes_for_moe_partitioner_no_overrides self<block_start>partitioner=moe_partitioning.MoePjitPartitioner(num_experts=8 num_partitions=1 state_filter_fn=training_utils.match_fn(r'no_state_matching'))<line_sep>optimizer=create_optimizer()<line_sep>train_state=FlaxOptimTrainState(optimizer params_axes={'logits_dense_axes':AxisMetadata(names=('vocab' 'embed')) 'mlp':{'wo':{'kernel_axes':AxisMetadata(names=('embed' 'mlp'))}}})<line_sep>logical_axes=partitioner.get_logical_axes(train_state)<line_sep># No updates to state. Should match what derive_logical_axes() returns.
jax.tree_map(self.assertIsNone logical_axes.param_states['logits_dense'])<line_sep>self.assertEqual(logical_axes.param_states['mlp']['wo']['kernel'].grad_ema PartitionSpec('embed' 'mlp'))<line_sep>self.assertIsNone(logical_axes.param_states['mlp']['wo']['kernel'].grad_sq_ema)<line_sep>self.assertEqual(logical_axes.params {'logits_dense':PartitionSpec('vocab' 'embed') 'mlp':{'wo':{'kernel':PartitionSpec('embed' 'mlp')}}})<block_end><def_stmt>test_logical_axes_for_moe_partitioner_with_overrides self<block_start>partitioner=moe_partitioning.MoePjitPartitioner(num_experts=8 num_partitions=1 state_filter_fn=training_utils.match_fn(r'.*mlp.*'))<line_sep>optimizer=create_optimizer()<line_sep>train_state=FlaxOptimTrainState(optimizer params_axes={'logits_dense_axes':AxisMetadata(names=('vocab' 'embed')) 'mlp':{'wo':{'kernel_axes':AxisMetadata(names=('embed' 'mlp'))}}})<line_sep>logical_axes=partitioner.get_logical_axes(train_state)<line_sep>jax.tree_map(self.assertIsNone logical_axes.param_states['logits_dense'])<line_sep># 'mlp' params should be prepended with 'expert' spec because
# state_filter_fn matches '.*mlp.*'.
self.assertEqual(logical_axes.param_states['mlp']['wo']['kernel'].grad_ema PartitionSpec('expert' 'embed' 'mlp'))<line_sep>self.assertEqual(logical_axes.param_states['mlp']['wo']['kernel'].grad_sq_ema PartitionSpec('expert' ))<line_sep>self.assertEqual(logical_axes.params {'logits_dense':PartitionSpec('vocab' 'embed') 'mlp':{'wo':{'kernel':PartitionSpec('embed' 'mlp')}}})<block_end><def_stmt>test_inference_state_logical_axes self<block_start>partitioner=moe_partitioning.MoePjitPartitioner(num_experts=8 num_partitions=1)<line_sep>model_variables=flax_core.freeze({'params':{'dense':{'bias':np.zeros(4) 'kernel':np.zeros((2 4))}} 'params_axes':{'dense':{'bias_axes':AxisMetadata(names=('embed' )) 'kernel_axes':AxisMetadata(names=('vocab' 'embed')) }} })<line_sep>train_state=InferenceState.create(model_variables)<line_sep>logical_axes=partitioner.get_logical_axes(train_state)<line_sep># No expert axis overrides to InferenceState. Partition specs should match
# input axis metadata.
self.assertEqual(logical_axes InferenceState(step=<none> params=flax_core.FrozenDict({'dense':{'bias':PartitionSpec('embed' ) 'kernel':PartitionSpec('vocab' 'embed') } })))<block_end>@mock.patch('jax.device_count')<def_stmt>test_overridden_logical_axis_rules self device_count:int<block_start>device_count.return_value=4<line_sep># Fewer experts than devices --> modified axis rules with two 'batch' axes.
self.assertEqual(moe_partitioning.standard_logical_axis_rules(num_experts=1 num_partitions=1 model_parallel_submesh=<none> additional_rules=[('additional' 'model') ('expert_magic' 'data')]) [('batch' ('data' 'model')) # Shard batch over entire mesh
# No sharding of weights over model axis.
('vocab' <none>) ('embed' <none>) ('mlp' <none>) ('heads' <none>) ('kv' <none>) ('joined_kv' <none>) ('relpos_buckets' <none>) ('abspos_buckets' <none>) ('length' <none>) ('layers' <none>) ('stack' <none>) ('mlp_activations' <none>) ('expert' 'data') # Shard experts over "first" data axis only
('expert_mlp' <none>) ('expert_group' <none>) # Experts replicated along "second" data axis
('expert_replicas' 'model') ('unmodeled' <none>) ('additional' <none>) ('expert_magic' 'data') ])<block_end><def_stmt>test_default_logical_axis self# Model parallelism used --> default logical axis rules.
<block_start>self.assertEqual(moe_partitioning.standard_logical_axis_rules(num_experts=1 num_partitions=2 model_parallel_submesh=<none> additional_rules=[('additional' 'model')]) [('batch' 'data') # Shard batch over single data axis
# Default model annotations used.
('vocab' 'model') ('embed' <none>) ('mlp' 'model') ('heads' 'model') ('kv' <none>) ('joined_kv' 'model') ('relpos_buckets' <none>) ('abspos_buckets' <none>) ('length' <none>) ('layers' <none>) ('stack' <none>) ('mlp_activations' <none>) ('expert' 'data') # Shard experts along data axis
('expert_mlp' 'model') ('expert_group' <none>) ('expert_replicas' <none>) ('unmodeled' <none>) ('additional' 'model') ])<block_end><def_stmt>test_data_partition_spec self<block_start>self.assertEqual(moe_partitioning.data_partition_spec(two_data_axes=<false>) PartitionSpec('data' ))<line_sep>self.assertEqual(moe_partitioning.data_partition_spec(two_data_axes=<true>) PartitionSpec(('data' 'model') ))<block_end>@mock.patch('jax.device_count')<def_stmt>test_when_to_override_model_axis self device_count:int<block_start>device_count.return_value=4<line_sep># More experts than devices.
self.assertFalse(moe_partitioning._override_model_axis(num_experts=8 num_partitions=1 model_parallel_submesh=<none>))<line_sep># Fewer experts than devices.
self.assertTrue(moe_partitioning._override_model_axis(num_experts=1 num_partitions=1 model_parallel_submesh=<none>))<line_sep># Model parallelism used.
self.assertFalse(moe_partitioning._override_model_axis(num_experts=1 num_partitions=2 model_parallel_submesh=<none>))<block_end><def_stmt>test_axis_resource_overrides self<block_start>input_resources=(PartitionSpec('data') PartitionSpec('model') <none> PartitionSpec('unrecognized'))<line_sep>overridden_resources=moe_partitioning._override_partition_specs(input_resources)<line_sep># "data" -> ("data", "model"). "model" -> None.
self.assertEqual(overridden_resources (PartitionSpec(('data' 'model') ) <none> <none> PartitionSpec('unrecognized' )))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPEG SA3D box processing classes.
Enables the injection of an SA3D MPEG-4. The SA3D box specification
conforms to that outlined in docs/spatial-audio-rfc.md
"""<import_stmt>struct<import_from_stmt>spatialmedia.mpeg box<import_from_stmt>spatialmedia.mpeg constants<def_stmt>load fh position=<none> end=<none><block_start>""" Loads the SA3D box located at position in an mp4 file.
Args:
fh: file handle, input file handle.
position: int or None, current file position.
Returns:
new_box: box, SA3D box loaded from the file location or None.
"""<if_stmt>position<is><none><block_start>position=fh.tell()<block_end>fh.seek(position)<line_sep>new_box=SA3DBox()<line_sep>new_box.position=position<line_sep>size=struct.unpack(">I" fh.read(4))[0]<line_sep>name=fh.read(4)<if_stmt>(name<ne>constants.TAG_SA3D)<block_start>print("Error: box is not an SA3D box.")<line_sep><return><none><block_end><if_stmt>(position+size<g>end)<block_start>print("Error: SA3D box size exceeds bounds.")<line_sep><return><none><block_end>new_box.content_size=size-new_box.header_size<line_sep>new_box.version=struct.unpack(">B" fh.read(1))[0]<line_sep>new_box.ambisonic_type=struct.unpack(">B" fh.read(1))[0]<line_sep>new_box.head_locked_stereo=(new_box.ambisonic_type&int('10000000' 2)<ne>0)<line_sep>new_box.ambisonic_type=new_box.ambisonic_type&int('01111111' 2)<line_sep>new_box.ambisonic_order=struct.unpack(">I" fh.read(4))[0]<line_sep>new_box.ambisonic_channel_ordering=struct.unpack(">B" fh.read(1))[0]<line_sep>new_box.ambisonic_normalization=struct.unpack(">B" fh.read(1))[0]<line_sep>new_box.num_channels=struct.unpack(">I" fh.read(4))[0]<for_stmt>i range(0 new_box.num_channels)<block_start>new_box.channel_map.append(struct.unpack(">I" fh.read(4))[0])<block_end><return>new_box<block_end><class_stmt>SA3DBox(box.Box)<block_start>ambisonic_types={'periphonic':0}<line_sep>ambisonic_orderings={'ACN':0}<line_sep>ambisonic_normalizations={'SN3D':0}<def_stmt>__init__ self<block_start>box.Box.__init__(self)<line_sep>self.name=constants.TAG_SA3D<line_sep>self.header_size=8<line_sep>self.version=0<line_sep>self.ambisonic_type=0<line_sep>self.head_locked_stereo=<false><line_sep>self.ambisonic_order=0<line_sep>self.ambisonic_channel_ordering=0<line_sep>self.ambisonic_normalization=0<line_sep>self.num_channels=0<line_sep>self.channel_map=list()<block_end>@staticmethod<def_stmt>create num_channels audio_metadata<block_start>new_box=SA3DBox()<line_sep>new_box.header_size=8<line_sep>new_box.name=constants.TAG_SA3D<line_sep>new_box.version=0# uint8
new_box.content_size<augadd>1# uint8
new_box.ambisonic_type=SA3DBox.ambisonic_types[audio_metadata["ambisonic_type"]]<line_sep>new_box.head_locked_stereo=audio_metadata["head_locked_stereo"]<line_sep>new_box.content_size<augadd>1# uint8
new_box.ambisonic_order=audio_metadata["ambisonic_order"]<line_sep>new_box.content_size<augadd>4# uint32
new_box.ambisonic_channel_ordering=SA3DBox.ambisonic_orderings[audio_metadata["ambisonic_channel_ordering"]]<line_sep>new_box.content_size<augadd>1# uint8
new_box.ambisonic_normalization=SA3DBox.ambisonic_normalizations[audio_metadata["ambisonic_normalization"]]<line_sep>new_box.content_size<augadd>1# uint8
new_box.num_channels=num_channels<line_sep>new_box.content_size<augadd>4# uint32
channel_map=audio_metadata["channel_map"]<for_stmt>channel_element channel_map<block_start>new_box.channel_map.append(channel_element)<line_sep>new_box.content_size<augadd>4# uint32
<block_end><return>new_box<block_end><def_stmt>ambisonic_type_name self<block_start><return>next((key<for>key,value SA3DBox.ambisonic_types.items()<if>value<eq>self.ambisonic_type))<block_end><def_stmt>ambisonic_channel_ordering_name self<block_start><return>next((key<for>key,value SA3DBox.ambisonic_orderings.items()<if>value<eq>self.ambisonic_channel_ordering))<block_end><def_stmt>ambisonic_normalization_name self<block_start><return>next((key<for>key,value SA3DBox.ambisonic_normalizations.items()<if>value<eq>self.ambisonic_normalization))<block_end><def_stmt>print_box self console<block_start>""" Prints the contents of this spatial audio (SA3D) box to the
console.
"""<line_sep>ambisonic_type=self.ambisonic_type_name()<line_sep>channel_ordering=self.ambisonic_channel_ordering_name()<line_sep>ambisonic_normalization=self.ambisonic_normalization_name()<line_sep>console("\t\tAmbisonic Type: %s"%ambisonic_type)<line_sep>console("\t\tContains Head-Locked Stereo: %r"%self.head_locked_stereo)<line_sep>console("\t\tAmbisonic Order: %d"%self.ambisonic_order)<line_sep>console("\t\tAmbisonic Channel Ordering: %s"%channel_ordering)<line_sep>console("\t\tAmbisonic Normalization: %s"%ambisonic_normalization)<line_sep>console("\t\tNumber of Channels: %d"%self.num_channels)<line_sep>console("\t\tChannel Map: %s"%str(self.channel_map))<block_end><def_stmt>get_metadata_string self<block_start>""" Outputs a concise single line audio metadata string. """<line_sep>metadata="%s, %s, %s, Order %d, %d Channel(s), Channel Map: %s"%(self.ambisonic_normalization_name() self.ambisonic_channel_ordering_name() self.ambisonic_type_name() self.ambisonic_order self.num_channels str(self.channel_map))<line_sep><return>metadata<block_end><def_stmt>save self in_fh out_fh delta<block_start><if_stmt>(self.header_size<eq>16)<block_start>out_fh.write(struct.pack(">I" 1))<line_sep>out_fh.write(struct.pack(">Q" self.size()))<line_sep>out_fh.write(self.name)<block_end><elif_stmt>(self.header_size<eq>8)<block_start>out_fh.write(struct.pack(">I" self.size()))<line_sep>out_fh.write(self.name)<block_end>ambisonic_type=(self.ambisonic_type|int('10000000' 2)<if>self.head_locked_stereo<else>self.ambisonic_type&int('01111111' 2))<line_sep>out_fh.write(struct.pack(">B" self.version))<line_sep>out_fh.write(struct.pack(">B" ambisonic_type))<line_sep>out_fh.write(struct.pack(">I" self.ambisonic_order))<line_sep>out_fh.write(struct.pack(">B" self.ambisonic_channel_ordering))<line_sep>out_fh.write(struct.pack(">B" self.ambisonic_normalization))<line_sep>out_fh.write(struct.pack(">I" self.num_channels))<for_stmt>i self.channel_map<block_start><if_stmt>(i<ne><none>)<block_start>out_fh.write(struct.pack(">I" int(i)))<block_end><block_end><block_end><block_end> |
<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.db connections<import_from_stmt>symposion.reviews.models ProposalResult promote_proposal<class_stmt>Command(BaseCommand)<block_start><def_stmt>handle self *args **options<block_start>accepted_proposals=ProposalResult.objects.filter(status="accepted")<line_sep>accepted_proposals=accepted_proposals.order_by("proposal")<for_stmt>result accepted_proposals<block_start>promote_proposal(result.proposal)<block_end>connections["default"].cursor().execute("SELECT setval('schedule_session_id_seq', (SELECT max(id) FROM schedule_session))")<block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright 2012 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY VINCENT DRIESSEN ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL VINCENT DRIESSEN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Vincent Driessen.
#
"""
envelopes.connstack
===================
This module implements SMTP connection stack management.
"""<import_from_stmt>contextlib contextmanager<import_from_stmt>.local LocalStack release_local<class_stmt>NoSMTPConnectionException(Exception)<block_start><pass><block_end>@contextmanager<def_stmt>Connection connection<block_start>push_connection(connection)<try_stmt><block_start><yield><block_end><finally_stmt><block_start>popped=pop_connection()<assert_stmt>popped<eq>connection 'Unexpected SMTP connection was popped off the stack. '<concat>'Check your SMTP connection setup.'<block_end><block_end><def_stmt>push_connection connection<block_start>"""Pushes the given connection on the stack."""<line_sep>_connection_stack.push(connection)<block_end><def_stmt>pop_connection <block_start>"""Pops the topmost connection from the stack."""<line_sep><return>_connection_stack.pop()<block_end><def_stmt>use_connection connection<block_start>"""Clears the stack and uses the given connection. Protects against mixed
use of use_connection() and stacked connection contexts.
"""<assert_stmt>len(_connection_stack)<le>1 'You should not mix Connection contexts with use_connection().'<line_sep>release_local(_connection_stack)<line_sep>push_connection(connection)<block_end><def_stmt>get_current_connection <block_start>"""Returns the current SMTP connection (i.e. the topmost on the
connection stack).
"""<line_sep><return>_connection_stack.top<block_end><def_stmt>resolve_connection connection=<none><block_start>"""Convenience function to resolve the given or the current connection.
Raises an exception if it cannot resolve a connection now.
"""<if_stmt>connection<is><not><none><block_start><return>connection<block_end>connection=get_current_connection()<if_stmt>connection<is><none><block_start><raise>NoSMTPConnectionException('Could not resolve an SMTP connection.')<block_end><return>connection<block_end>_connection_stack=LocalStack()<line_sep>__all__=['Connection' 'get_current_connection' 'push_connection' 'pop_connection' 'use_connection']<line_sep> |
# Problem: https://www.hackerrank.com/challenges/repeated-string/problem
# Score: 20
<def_stmt>repeated_string s n<block_start><return>n<floordiv>len(s)<times>s.count('a')+s[0:n%len(s)].count('a')<block_end>s=input()<line_sep>n=int(input())<line_sep>print(repeated_string(s n))<line_sep> |
# Python Stream Deck Library
# Released under the MIT license
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
<import_stmt>io<def_stmt>create_image deck background='black'<block_start>"""
Creates a new PIL Image with the correct image dimensions for the given
StreamDeck device's keys.
.. seealso:: See :func:`~PILHelper.to_native_format` method for converting a
PIL image instance to the native image format of a given
StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible image for.
:param str background: Background color to use, compatible with `PIL.Image.new()`.
:rtype: PIL.Image
:return: Created PIL image
"""<import_from_stmt>PIL Image<line_sep>image_format=deck.key_image_format()<line_sep><return>Image.new("RGB" image_format['size'] background)<block_end><def_stmt>create_scaled_image deck image margins=[0 0 0 0] background='black'<block_start>"""
Creates a new key image that contains a scaled version of a given image,
resized to best fit the given StreamDeck device's keys with the given
margins around each side.
The scaled image is centered within the new key image, offset by the given
margins. The aspect ratio of the image is preserved.
.. seealso:: See :func:`~PILHelper.to_native_format` method for converting a
PIL image instance to the native image format of a given
StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible image for.
:param Image image: PIL Image object to scale
:param list(int): Array of margin pixels in (top, right, bottom, left) order.
:param str background: Background color to use, compatible with `PIL.Image.new()`.
:rtrype: PIL.Image
:return: Loaded PIL image scaled and centered
"""<import_from_stmt>PIL Image<if_stmt>len(margins)<ne>4<block_start><raise>ValueError("Margins should be given as an array of four integers.")<block_end>final_image=create_image(deck background=background)<line_sep>thumbnail_max_width=final_image.width-(margins[1]+margins[3])<line_sep>thumbnail_max_height=final_image.height-(margins[0]+margins[2])<line_sep>thumbnail=image.convert("RGBA")<line_sep>thumbnail.thumbnail((thumbnail_max_width thumbnail_max_height) Image.LANCZOS)<line_sep>thumbnail_x=(margins[3]+(thumbnail_max_width-thumbnail.width)<floordiv>2)<line_sep>thumbnail_y=(margins[0]+(thumbnail_max_height-thumbnail.height)<floordiv>2)<line_sep>final_image.paste(thumbnail (thumbnail_x thumbnail_y) thumbnail)<line_sep><return>final_image<block_end><def_stmt>to_native_format deck image<block_start>"""
Converts a given PIL image to the native image format for a StreamDeck,
suitable for passing to :func:`~StreamDeck.set_key_image`.
.. seealso:: See :func:`~PILHelper.create_image` method for creating a PIL
image instance for a given StreamDeck device.
:param StreamDeck deck: StreamDeck device to generate a compatible native image for.
:param PIL.Image image: PIL Image to convert to the native StreamDeck image format
:rtype: enumerable()
:return: Image converted to the given StreamDeck's native format
"""<import_from_stmt>PIL Image<line_sep>image_format=deck.key_image_format()<if_stmt>image_format['rotation']<block_start>image=image.rotate(image_format['rotation'])<block_end><if_stmt>image_format['flip'][0]<block_start>image=image.transpose(Image.FLIP_LEFT_RIGHT)<block_end><if_stmt>image_format['flip'][1]<block_start>image=image.transpose(Image.FLIP_TOP_BOTTOM)<block_end><if_stmt>image.size<ne>image_format['size']<block_start>image.thumbnail(image_format['size'])<block_end># We want a compressed image in a given codec, convert.
compressed_image=io.BytesIO()<line_sep>image.save(compressed_image image_format['format'] quality=100)<line_sep><return>compressed_image.getbuffer()<block_end> |
# -- Project information -----------------------------------------------------
project='LUNA'<line_sep>copyright='2020 Great Scott Gadgets'<line_sep>author='<NAME>'<line_sep># -- General configuration ---------------------------------------------------
master_doc='index'<line_sep>extensions=['sphinx.ext.autodoc' 'sphinx.ext.napoleon']<line_sep>templates_path=['_templates']<line_sep>exclude_patterns=['_build' 'Thumbs.db' '.DS_Store']<line_sep># -- Options for HTML output -------------------------------------------------
html_theme='sphinx_rtd_theme'<line_sep>html_static_path=['_static']<line_sep>html_css_files=['status.css']<line_sep># -- Options for automatic documentation -------------------------------------
# Skip documenting Tests.
<def_stmt>autodoc_skip_member_handler app what name obj skip options<block_start><return>name.endswith("Test")<or>name.startswith('_')<or>(name<eq>"elaborate")<block_end><def_stmt>setup app<block_start>app.connect('autodoc-skip-member' autodoc_skip_member_handler)<block_end> |
<import_stmt>datetime<import_stmt>math<import_from_stmt>calendar Calendar<import_from_stmt>collections defaultdict<import_from_stmt>django.core.exceptions ImproperlyConfigured<import_from_stmt>django.db.models Q<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>django.views.generic.dates DateMixin MonthMixin YearMixin _date_from_string <import_from_stmt>django.views.generic.list BaseListView MultipleObjectTemplateResponseMixin<line_sep>DAYS=(_("Monday") _("Tuesday") _("Wednesday") _("Thursday") _("Friday") _("Saturday") _("Sunday") )<def_stmt>daterange start_date end_date<block_start>"""
Returns an iterator of dates between two provided ones
"""<for_stmt>n range(int((end_date-start_date).days+1))<block_start><yield>start_date+datetime.timedelta(n)<block_end><block_end><class_stmt>BaseCalendarMonthView(DateMixin YearMixin MonthMixin BaseListView)<block_start>"""
A base view for displaying a calendar month
"""<line_sep>first_of_week=0# 0 = Monday, 6 = Sunday
paginate_by=<none># We don't want to use this part of MultipleObjectMixin
date_field=<none><line_sep>end_date_field=<none># For supporting events with duration
<def_stmt>get_paginate_by self queryset<block_start><if_stmt>self.paginate_by<is><not><none><block_start><raise>ImproperlyConfigured("'%s' cannot be paginated, it is a calendar view"%self.__class__.__name__)<block_end><return><none><block_end><def_stmt>get_allow_future self<block_start><return><true><block_end><def_stmt>get_end_date_field self<block_start>"""
Returns the model field to use for end dates
"""<line_sep><return>self.end_date_field<block_end><def_stmt>get_start_date self obj<block_start>"""
Returns the start date for a model instance
"""<line_sep>obj_date=getattr(obj self.get_date_field())<try_stmt><block_start>obj_date=obj_date.date()<block_end><except_stmt>AttributeError# It's a date rather than datetime, so we use it as is
<block_start><pass><block_end><return>obj_date<block_end><def_stmt>get_end_date self obj<block_start>"""
Returns the end date for a model instance
"""<line_sep>obj_date=getattr(obj self.get_end_date_field())<try_stmt><block_start>obj_date=obj_date.date()<block_end><except_stmt>AttributeError# It's a date rather than datetime, so we use it as is
<block_start><pass><block_end><return>obj_date<block_end><def_stmt>get_first_of_week self<block_start>"""
Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday.
"""<if_stmt>self.first_of_week<is><none><block_start><raise>ImproperlyConfigured("%s.first_of_week is required."%self.__class__.__name__)<block_end><if_stmt>self.first_of_week<not><in>range(7)<block_start><raise>ImproperlyConfigured("%s.first_of_week must be an integer between 0 and 6."%self.__class__.__name__)<block_end><return>self.first_of_week<block_end><def_stmt>get_queryset self<block_start>"""
Returns a queryset of models for the month requested
"""<line_sep>qs=super().get_queryset()<line_sep>year=self.get_year()<line_sep>month=self.get_month()<line_sep>date_field=self.get_date_field()<line_sep>end_date_field=self.get_end_date_field()<line_sep>date=_date_from_string(year self.get_year_format() month self.get_month_format())<line_sep>since=date<line_sep>until=self.get_next_month(date)<line_sep># Adjust our start and end dates to allow for next and previous
# month edges
<if_stmt>since.weekday()<ne>self.get_first_of_week()<block_start>diff=math.fabs(since.weekday()-self.get_first_of_week())<line_sep>since=since-datetime.timedelta(days=diff)<block_end><if_stmt>until.weekday()<ne>((self.get_first_of_week()+6)%7)<block_start>diff=math.fabs(((self.get_first_of_week()+6)%7)-until.weekday())<line_sep>until=until+datetime.timedelta(days=diff)<block_end><if_stmt>end_date_field# 5 possible conditions for showing an event:
# 1) Single day event, starts after 'since'
# 2) Multi-day event, starts after 'since' and ends before 'until'
# 3) Starts before 'since' and ends after 'since' and before 'until'
# 4) Starts after 'since' but before 'until' and ends after 'until'
# 5) Starts before 'since' and ends after 'until'
<block_start>predicate1=Q(**{"%s__gte"%date_field:since end_date_field:<none>})<line_sep>predicate2=Q(**{"%s__gte"%date_field:since "%s__lt"%end_date_field:until})<line_sep>predicate3=Q(**{"%s__lt"%date_field:since "%s__gte"%end_date_field:since "%s__lt"%end_date_field:until })<line_sep>predicate4=Q(**{"%s__gte"%date_field:since "%s__lt"%date_field:until "%s__gte"%end_date_field:until })<line_sep>predicate5=Q(**{"%s__lt"%date_field:since "%s__gte"%end_date_field:until})<line_sep><return>qs.filter(predicate1|predicate2|predicate3|predicate4|predicate5)<block_end><return>qs.filter(**{"%s__gte"%date_field:since})<block_end><def_stmt>get_context_data self **kwargs<block_start>"""
Injects variables necessary for rendering the calendar into the context.
Variables added are: `calendar`, `weekdays`, `month`, `next_month` and
`previous_month`.
"""<line_sep>data=super().get_context_data(**kwargs)<line_sep>year=self.get_year()<line_sep>month=self.get_month()<line_sep>date=_date_from_string(year self.get_year_format() month self.get_month_format())<line_sep>cal=Calendar(self.get_first_of_week())<line_sep>month_calendar=[]<line_sep>now=datetime.datetime.utcnow()<line_sep>date_lists=defaultdict(list)<line_sep>multidate_objs=[]<for_stmt>obj data["object_list"]<block_start>obj_date=self.get_start_date(obj)<line_sep>end_date_field=self.get_end_date_field()<if_stmt>end_date_field<block_start>end_date=self.get_end_date(obj)<if_stmt>end_date<and>end_date<ne>obj_date<block_start>multidate_objs.append({"obj":obj "range":[x<for>x daterange(obj_date end_date)] })<line_sep><continue># We don't put multi-day events in date_lists
<block_end><block_end>date_lists[obj_date].append(obj)<block_end><for_stmt>week cal.monthdatescalendar(date.year date.month)<block_start>week_range=set(daterange(week[0] week[6]))<line_sep>week_events=[]<for_stmt>val multidate_objs<block_start>intersect_length=len(week_range.intersection(val["range"]))<if_stmt>intersect_length# Event happens during this week
<block_start>slot=1<line_sep>width=(intersect_length# How many days is the event during this week?
)<line_sep>nowrap_previous=(<true># Does the event continue from the previous week?
)<line_sep>nowrap_next=<true># Does the event continue to the next week?
<if_stmt>val["range"][0]<ge>week[0]<block_start>slot=1+(val["range"][0]-week[0]).days<block_end><else_stmt><block_start>nowrap_previous=<false><block_end><if_stmt>val["range"][-1]<g>week[6]<block_start>nowrap_next=<false><block_end>week_events.append({"event":val["obj"] "slot":slot "width":width "nowrap_previous":nowrap_previous "nowrap_next":nowrap_next })<block_end><block_end>week_calendar={"events":week_events "date_list":[]}<for_stmt>day week<block_start>week_calendar["date_list"].append({"day":day "events":date_lists[day] "today":day<eq>now.date() "is_current_month":day.month<eq>date.month })<block_end>month_calendar.append(week_calendar)<block_end>data["calendar"]=month_calendar<line_sep>data["weekdays"]=[DAYS[x]<for>x cal.iterweekdays()]<line_sep>data["month"]=date<line_sep>data["next_month"]=self.get_next_month(date)<line_sep>data["previous_month"]=self.get_previous_month(date)<line_sep><return>data<block_end><block_end><class_stmt>CalendarMonthView(MultipleObjectTemplateResponseMixin BaseCalendarMonthView)<block_start>"""
A view for displaying a calendar month, and rendering a template response
"""<line_sep>template_name_suffix="_calendar_month"<block_end> |
<import_from_stmt>flask Flask<import_from_stmt>flask_sqlalchemy SQLAlchemy<line_sep>app=Flask(__name__)<line_sep>app.config.from_pyfile('settings.cfg')<line_sep>db=SQLAlchemy(app)<if_stmt><not>app.debug<block_start><import_stmt>logging<line_sep>fmt="%(levelname)s - %(asctime)s %(filename)s:%(lineno)d %(message)s"<line_sep>formatter=logging.Formatter(fmt=fmt)<line_sep>log_path='/var/log/flask/{}.log'.format(__name__)<line_sep>file_handler=logging.FileHandler(log_path)<line_sep>file_handler.setFormatter(formatter)<line_sep>app.logger.setLevel(logging.INFO)<line_sep>app.logger.addHandler(file_handler)<block_end> |
<def_stmt>on_message_deleted msg server<block_start><return>"Deleted: {}".format(msg["previous_message"]["text"])<block_end><def_stmt>on_message_changed msg server<block_start>text=msg.get("message" {"text":""}).get("text" "")<if_stmt>text.startswith("!echo")<block_start><return>"Changed: {}".format(text)<block_end><block_end><def_stmt>on_message msg server<block_start><if_stmt>msg["text"].startswith("!echo")<block_start><return>msg.get("text" "")<block_end><block_end><def_stmt>on_channel_join msg server<block_start><return>"saw user {} join".format(msg['user'])<block_end> |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>numpy<as>np<import_stmt>cv2<import_from_stmt>io BytesIO<def_stmt>resize_by_max image max_side=512 force=<false><block_start>h,w=image.shape[:2]<if_stmt>max(h w)<l>max_side<and><not>force<block_start><return>image<block_end>ratio=max(h w)/max_side<line_sep>w=int(w/ratio+0.5)<line_sep>h=int(h/ratio+0.5)<line_sep><return>cv2.resize(image (w h))<block_end> |
"""This is a cog for a discord.py bot.
It adds Lamp
"""<import_from_stmt>discord.ext commands<class_stmt>Lamp(commands.Cog command_attrs=dict(hidden=<true>))<block_start><def_stmt>__init__ self client<block_start>self.client=client<block_end>@commands.group(name='lamp' hidden=<true> invoke_without_command=<true> )<async_keyword><def_stmt>lamp self ctx<block_start>"""Commands to control the live stream integration"""<line_sep><await>ctx.send_help('lamp')<block_end>@lamp.command(name='off' )<async_keyword><def_stmt>lamp_off self ctx<block_start>url='https://a1.tuyaus.com/api.json?appVersion=3.13.0&appRnVersion=5.18&channel=oem&sign=47e07d9cf53bbab369fc504760c8d3752f0f7c2f8a56fe8c63f28c99d7bb8e1c&platform=ONEPLUS%20A5000&requestId=7c696d1e-8579-4871-b271-71b6a3a093d5&lang=en&a=tuya.m.device.dp.publish&clientId=ekmnwp9f5pnh3trdtpgy&osSystem=9&os=Android&timeZoneId=America%2FChicago&ttid=sdk_tuya%40ekmnwp9f5pnh3trdtpgy&et=0.0.1&v=1.0&sdkVersion=3.13.0&time=1572717891'<line_sep>headers={'User-Agent':'TY-UA=APP/Android/3.13.0/SDK/3.13.0' 'Content-Type':'application/x-www-form-urlencoded' 'Content-Length':'260' 'Host':'a1.tuyaus.com' 'Connection':'Keep-Alive' 'Accept-Encoding':'gzip' }<line_sep>data={'postData':'{"devId":"06200623b4e62d1a196d","dps":"{\\"1\\":false}","gwId":"06200623b4e62d1a196d"}' 'deviceId':'0cbe6a9f082da9d8ad9607677542561f46adb4592222' 'sid':'az152789n0645407g6y4cy235e9cec2811a8b93caefedeea3c2ce5a8' }<async_keyword><with_stmt>self.client.session.post(url headers=headers data=data)<as>response<block_start>res=<await>response.json()<line_sep>print(res)<if_stmt>res['status']<eq>'ok'<block_start><await>ctx.send('Success')<block_end><block_end><block_end>@lamp.command(name='on' )<async_keyword><def_stmt>lamp_on self ctx<block_start>print('on')<line_sep>url='https://a1.tuyaus.com/api.json?appVersion=3.13.0&appRnVersion=5.18&channel=oem&sign=a8a0a9914c77dc5d01f2826a2588bb25151a1d9b46688223b10586a3fc56a4c7&platform=ONEPLUS%20A5000&requestId=3a891769-255a-4a55-971a-551df700252f&lang=en&a=tuya.m.device.dp.publish&clientId=ekmnwp9f5pnh3trdtpgy&osSystem=9&os=Android&timeZoneId=America%2FChicago&ttid=sdk_tuya%40ekmnwp9f5pnh3trdtpgy&et=0.0.1&v=1.0&sdkVersion=3.13.0&time=1572717894'<line_sep>headers={'User-Agent':'TY-UA=APP/Android/3.13.0/SDK/3.13.0' 'Content-Type':'application/x-www-form-urlencoded' 'Content-Length':'259' 'Host':'a1.tuyaus.com' 'Connection':'Keep-Alive' 'Accept-Encoding':'gzip' }<line_sep>data={'postData':'{"devId":"06200623b4e62d1a196d","dps":"{\\"1\\":true}","gwId":"06200623b4e62d1a196d"}' 'deviceId':'0cbe6a9f082da9d8ad9607677542561f46adb4592222' 'sid':'az152789n0645407g6y4cy235e9cec2811a8b93caefedeea3c2ce5a8' }<line_sep>print('sending')<async_keyword><with_stmt>self.client.session.post(url headers=headers data=data)<as>response<block_start>res=<await>response.json()<line_sep>print(res)<if_stmt>res['status']<eq>'ok'<block_start><await>ctx.send('Success')<block_end><block_end><block_end><block_end><def_stmt>setup client<block_start>"""This is called when the cog is loaded via load_extension"""<line_sep>client.add_cog(Lamp(client))<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>unittest<import_stmt>os<import_stmt>numpy<as>npy<import_from_stmt>skrf.media DefinedGammaZ0 Media<import_from_stmt>skrf.network Network<import_from_stmt>skrf.frequency Frequency<import_stmt>skrf<class_stmt>DefinedGammaZ0TestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.files_dir=os.path.join(os.path.dirname(os.path.abspath(__file__)) 'qucs_prj')<line_sep>self.dummy_media=DefinedGammaZ0(frequency=Frequency(1 100 21 'ghz') gamma=1j z0=50 )<block_end><def_stmt>test_impedance_mismatch self<block_start>"""
"""<line_sep>fname=os.path.join(self.files_dir 'impedanceMismatch,50to25.s2p')<line_sep>qucs_ntwk=Network(fname)<line_sep>self.dummy_media.frequency=qucs_ntwk.frequency<line_sep>skrf_ntwk=self.dummy_media.thru(z0=50)<power>self.dummy_media.thru(z0=25)<line_sep>self.assertEqual(qucs_ntwk skrf_ntwk)<block_end><def_stmt>test_resistor self<block_start>"""
"""<line_sep>fname=os.path.join(self.files_dir 'resistor,1ohm.s2p')<line_sep>qucs_ntwk=Network(fname)<line_sep>self.dummy_media.frequency=qucs_ntwk.frequency<line_sep>skrf_ntwk=self.dummy_media.resistor(1)<line_sep>self.assertEqual(qucs_ntwk skrf_ntwk)<block_end><def_stmt>test_capacitor self<block_start>"""
"""<line_sep>fname=os.path.join(self.files_dir 'capacitor,p01pF.s2p')<line_sep>qucs_ntwk=Network(fname)<line_sep>self.dummy_media.frequency=qucs_ntwk.frequency<line_sep>skrf_ntwk=self.dummy_media.capacitor(.01e-12)<line_sep>self.assertEqual(qucs_ntwk skrf_ntwk)<block_end><def_stmt>test_inductor self<block_start>"""
"""<line_sep>fname=os.path.join(self.files_dir 'inductor,p1nH.s2p')<line_sep>qucs_ntwk=Network(fname)<line_sep>self.dummy_media.frequency=qucs_ntwk.frequency<line_sep>skrf_ntwk=self.dummy_media.inductor(.1e-9)<line_sep>self.assertEqual(qucs_ntwk skrf_ntwk)<block_end><def_stmt>test_scalar_gamma_z0_media self<block_start>"""
test ability to create a Media from scalar quantities for gamma/z0
and change frequency resolution
"""<line_sep>a=DefinedGammaZ0(Frequency(1 10 101) gamma=1j z0=50)<line_sep>self.assertEqual(a.line(1) a.line(1))<line_sep># we should be able to re-sample the media
a.npoints=21<line_sep>self.assertEqual(len(a.gamma) len(a))<line_sep>self.assertEqual(len(a.z0) len(a))<line_sep>self.assertEqual(len(a.z0) len(a))<block_end><def_stmt>test_vector_gamma_z0_media self<block_start>"""
test ability to create a Media from vector quantities for gamma/z0
"""<line_sep>freq=Frequency(1 10 101)<line_sep>a=DefinedGammaZ0(freq gamma=1j<times>npy.ones(len(freq)) z0=50<times>npy.ones(len(freq)) )<line_sep>self.assertEqual(a.line(1) a.line(1))<with_stmt>self.assertRaises(NotImplementedError)<block_start>a.npoints=4<block_end><block_end><def_stmt>test_write_csv self<block_start>fname=os.path.join(self.files_dir 'out.csv')<line_sep>self.dummy_media.write_csv(fname)<line_sep>os.remove(fname)<block_end><def_stmt>test_from_csv self<block_start>fname=os.path.join(self.files_dir 'out.csv')<line_sep>self.dummy_media.write_csv(fname)<line_sep>a_media=DefinedGammaZ0.from_csv(fname)<line_sep>self.assertEqual(a_media self.dummy_media)<line_sep>os.remove(fname)<block_end><block_end><class_stmt>STwoPortsNetworkTestCase(unittest.TestCase)<block_start>"""
Check that S parameters of media base elements versus theoretical results.
"""<def_stmt>setUp self<block_start>self.dummy_media=DefinedGammaZ0(frequency=Frequency(1 100 21 'GHz') gamma=1j z0=50 )<block_end><def_stmt>test_s_series_element self<block_start>"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have S matrix of the form:
[ Z/Z0 / (Z/Z0 + 2) 2/(Z/Z0 + 2) ]
[ 2/(Z/Z0 + 2) Z/Z0 / (Z/Z0 + 2) ]
"""<line_sep>R=1.0# Ohm
ntw=self.dummy_media.resistor(R)<line_sep>Z0=self.dummy_media.z0<line_sep>S11=(R/Z0)/(R/Z0+2)<line_sep>S21=2/(R/Z0+2)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 0] S11)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 1] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 0] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 1] S11)<block_end><def_stmt>test_s_shunt_element self<block_start>"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have S matrix of the form:
[ -Y Z0 / (Y Z0 + 2) 2/(Y Z0 + 2) ]
[ 2/(Y Z0 + 2) Z/Z0 / (Y Z0 + 2) ]
"""<line_sep>R=1.0# Ohm
ntw=self.dummy_media.shunt(self.dummy_media.resistor(R)<power>self.dummy_media.short())<line_sep>Z0=self.dummy_media.z0<line_sep>S11=-(1/R<times>Z0)/(1/R<times>Z0+2)<line_sep>S21=2/(1/R<times>Z0+2)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 0] S11)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 1] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 0] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 1] S11)<block_end><def_stmt>test_s_lossless_line self<block_start>"""
Lossless transmission line of characteristic impedance z1, length l
and wavenumber beta
_______
○----- -----○
z0 z1 z0
○-----_______-----○
"""<line_sep>l=5.0<line_sep>z1=30.0<line_sep>z0=self.dummy_media.z0<line_sep>ntw=self.dummy_media.line(d=0 unit='m' z0=z0)<power>self.dummy_media.line(d=l unit='m' z0=z1)<power>self.dummy_media.line(d=0 unit='m' z0=z0)<line_sep>beta=self.dummy_media.beta<line_sep>_z1=z1/z0<line_sep>S11=1j<times>(_z1<power>2-1)<times>npy.sin(beta<times>l)/(2<times>_z1<times>npy.cos(beta<times>l)+1j<times>(_z1<power>2+1)<times>npy.sin(beta<times>l))<line_sep>S21=2<times>_z1/(2<times>_z1<times>npy.cos(beta<times>l)+1j<times>(_z1<power>2+1)<times>npy.sin(beta<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 0] S11)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 0 1] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 0] S21)<line_sep>npy.testing.assert_array_almost_equal(ntw.s[: 1 1] S11)<block_end><def_stmt>test_s_lossy_line self<block_start>"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""<block_end><block_end><class_stmt>ABCDTwoPortsNetworkTestCase(unittest.TestCase)<block_start>"""
Check that ABCD parameters of media base elements (such as lumped elements)
versus theoretical results.
"""<def_stmt>setUp self<block_start>self.dummy_media=DefinedGammaZ0(frequency=Frequency(1 100 21 'GHz') gamma=1j z0=50 )<block_end><def_stmt>test_abcd_series_element self<block_start>"""
Series elements of impedance Z:
○---[Z]---○
○---------○
have ABCD matrix of the form:
[ 1 Z ]
[ 0 1 ]
"""<line_sep>R=1.0# Ohm
ntw=self.dummy_media.resistor(R)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] 1.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] R)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 0.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] 1.0)<block_end><def_stmt>test_abcd_shunt_element self<block_start>"""
Shunt elements of admittance Y:
○---------○
|
[Y]
|
○---------○
have ABCD matrix of the form:
[ 1 0 ]
[ Y 1 ]
"""<line_sep>R=1.0# Ohm
ntw=self.dummy_media.shunt(self.dummy_media.resistor(R)<power>self.dummy_media.short())<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] 1.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] 0.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 1.0/R)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] 1.0)<block_end><def_stmt>test_abcd_series_shunt_elements self<block_start>"""
Series and Shunt elements of impedance Zs and Zp:
○---[Zs]--------○
|
[Zp]
|
○--------------○
have ABCD matrix of the form:
[ 1 + Zs/Zp Zs ]
[ 1/Zp 1 ]
"""<line_sep>Rs=2.0<line_sep>Rp=3.0<line_sep>serie_resistor=self.dummy_media.resistor(Rs)<line_sep>shunt_resistor=self.dummy_media.shunt(self.dummy_media.resistor(Rp)<power>self.dummy_media.short())<line_sep>ntw=serie_resistor<power>shunt_resistor<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] 1.0+Rs/Rp)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] Rs)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 1.0/Rp)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] 1.0)<block_end><def_stmt>test_abcd_thru self<block_start>"""
Thru has ABCD matrix of the form:
[ 1 0 ]
[ 0 1 ]
"""<line_sep>ntw=self.dummy_media.thru()<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] 1.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] 0.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 0.0)<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] 1.0)<block_end><def_stmt>test_abcd_lossless_line self<block_start>"""
Lossless transmission line of characteristic impedance Z0, length l
and wavenumber beta
○---------○
○---------○
has ABCD matrix of the form:
[ cos(beta l) j Z0 sin(beta l) ]
[ j/Z0 sin(beta l) cos(beta l) ]
"""<line_sep>l=5<line_sep>z0=80<line_sep>ntw=self.dummy_media.line(d=l unit='m' z0=z0)<line_sep>beta=self.dummy_media.beta<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] npy.cos(beta<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] 1j<times>z0<times>npy.sin(beta<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 1j/z0<times>npy.sin(beta<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] npy.cos(beta<times>l))<block_end><def_stmt>test_abcd_lossy_line self<block_start>"""
Lossy transmission line of characteristic impedance Z0, length l
and propagation constant gamma = alpha + j beta
○---------○
○---------○
has ABCD matrix of the form:
[ cosh(gamma l) Z0 sinh(gamma l) ]
[ 1/Z0 sinh(gamma l) cosh(gamma l) ]
"""<line_sep>l=5.0<line_sep>z0=30.0<line_sep>alpha=0.5<line_sep>beta=2.0<line_sep>lossy_media=DefinedGammaZ0(frequency=Frequency(1 100 21 'GHz') gamma=alpha+1j<times>beta z0=z0)<line_sep>ntw=lossy_media.line(d=l unit='m' z0=z0)<line_sep>gamma=lossy_media.gamma<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 0] npy.cosh(gamma<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 0 1] z0<times>npy.sinh(gamma<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 0] 1.0/z0<times>npy.sinh(gamma<times>l))<line_sep>npy.testing.assert_array_almost_equal(ntw.a[: 1 1] npy.cosh(gamma<times>l))<block_end><block_end> |
<import_stmt>sys<import_stmt>os<def_stmt>run <block_start>base=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))<line_sep>## FIXME: this is kind of crude; if we could create a fake pip
## module, then exec into it and update pip.__path__ properly, we
## wouldn't have to update sys.path:
sys.path.insert(0 base)<import_stmt>pip<line_sep><return>pip.main()<block_end><if_stmt>__name__<eq>'__main__'<block_start>exit=run()<if_stmt>exit<block_start>sys.exit(exit)<block_end><block_end> |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Downloader tests helper utils"""<import_from_stmt>unittest SkipTest<import_from_stmt>datalad.downloaders.providers Providers<def_stmt>get_test_providers url=<none> reload=<false><block_start>"""Return reusable instance of our global providers + verify credentials for url"""<line_sep>_test_providers=Providers.from_config_files(reload=reload)<if_stmt>url<is><not><none># check if we have credentials for the url
<block_start>provider=_test_providers.get_provider(url only_nondefault=<true>)<if_stmt>provider<is><none><or>provider.credential<is><none># no registered provider, or no credential needed,must be all kosher to access
<block_start><pass><block_end><elif_stmt><not>provider.credential.is_known<block_start><raise>SkipTest("This test requires known credentials for %s"%provider.credential.name)<block_end><block_end><return>_test_providers<block_end>get_test_providers.__test__=<false><line_sep> |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
<import_stmt>flatbuffers<import_from_stmt>flatbuffers.compat import_numpy<line_sep>np=import_numpy()<class_stmt>TensorMap(object)<block_start>__slots__=['_tab']<line_sep>@classmethod<def_stmt>GetRootAsTensorMap cls buf offset<block_start>n=flatbuffers.encode.Get(flatbuffers.packer.uoffset buf offset)<line_sep>x=TensorMap()<line_sep>x.Init(buf n+offset)<line_sep><return>x<block_end>@classmethod<def_stmt>TensorMapBufferHasIdentifier cls buf offset size_prefixed=<false><block_start><return>flatbuffers.util.BufferHasIdentifier(buf offset b"\x54\x46\x4C\x33" size_prefixed=size_prefixed)<block_end># TensorMap
<def_stmt>Init self buf pos<block_start>self._tab=flatbuffers.table.Table(buf pos)<block_end># TensorMap
<def_stmt>Name self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))<if_stmt>o<ne>0<block_start><return>self._tab.String(o+self._tab.Pos)<block_end><return><none><block_end># TensorMap
<def_stmt>TensorIndex self<block_start>o=flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))<if_stmt>o<ne>0<block_start><return>self._tab.Get(flatbuffers.number_types.Uint32Flags o+self._tab.Pos)<block_end><return>0<block_end><block_end><def_stmt>TensorMapStart builder<block_start>builder.StartObject(2)<block_end><def_stmt>TensorMapAddName builder name<block_start>builder.PrependUOffsetTRelativeSlot(0 flatbuffers.number_types.UOffsetTFlags.py_type(name) 0)<block_end><def_stmt>TensorMapAddTensorIndex builder tensorIndex<block_start>builder.PrependUint32Slot(1 tensorIndex 0)<block_end><def_stmt>TensorMapEnd builder<block_start><return>builder.EndObject()<block_end><class_stmt>TensorMapT(object)# TensorMapT
<block_start><def_stmt>__init__ self<block_start>self.name=<none># type: str
self.tensorIndex=0<block_end># type: int
@classmethod<def_stmt>InitFromBuf cls buf pos<block_start>tensorMap=TensorMap()<line_sep>tensorMap.Init(buf pos)<line_sep><return>cls.InitFromObj(tensorMap)<block_end>@classmethod<def_stmt>InitFromObj cls tensorMap<block_start>x=TensorMapT()<line_sep>x._UnPack(tensorMap)<line_sep><return>x<block_end># TensorMapT
<def_stmt>_UnPack self tensorMap<block_start><if_stmt>tensorMap<is><none><block_start><return><block_end>self.name=tensorMap.Name()<line_sep>self.tensorIndex=tensorMap.TensorIndex()<block_end># TensorMapT
<def_stmt>Pack self builder<block_start><if_stmt>self.name<is><not><none><block_start>name=builder.CreateString(self.name)<block_end>TensorMapStart(builder)<if_stmt>self.name<is><not><none><block_start>TensorMapAddName(builder name)<block_end>TensorMapAddTensorIndex(builder self.tensorIndex)<line_sep>tensorMap=TensorMapEnd(builder)<line_sep><return>tensorMap<block_end><block_end> |
"""Configures pytest (beyond the ini file)."""<import_stmt>matplotlib<as>mpl<import_stmt>numpy<import_stmt>pytest<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>dapper.dpr_config rc<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>add_sci doctest_namespace<block_start>"""Add numpy as np for doctests."""<line_sep>doctest_namespace["np"]=numpy<line_sep>doctest_namespace["mpl"]=mpl<line_sep>doctest_namespace["plt"]=plt<line_sep>doctest_namespace["rnd"]=numpy.random<line_sep>doctest_namespace["rc"]=rc<block_end> |
<import_stmt>os<import_stmt>time<import_stmt>numpy<as>np<line_sep># from IPython import embed
print("perform experiments on amazoncat 13K (multilabel)")<line_sep>leaf_example_multiplier=2<line_sep>lr=1<line_sep>bits=30<line_sep>alpha=0.1# 0.3
passes=4<line_sep>learn_at_leaf=<true><line_sep>use_oas=<true><line_sep># num_queries = 1 #does not really use
dream_at_update=1<line_sep># hal_version = 1 #does not really use
loss="squared"<line_sep>dream_repeats=3<line_sep># Precision_at_K = 5
num_examples=1186239<line_sep>max_num_labels=13330<line_sep>tree_node=int(num_examples/(np.log(num_examples)/np.log(2)<times>leaf_example_multiplier))<line_sep>train_data="amazoncat_train.mat.mult_label.vw.txt"<line_sep>test_data="amazoncat_test.mat.mult_label.vw.txt"<if_stmt>os.path.exists(train_data)<is><not><true><block_start>os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))<block_end><if_stmt>os.path.exists(test_data)<is><not><true><block_start>os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))<block_end>saved_model="{}.vw".format(train_data)<line_sep>print("## Training...")<line_sep>start=time.time()<line_sep># train_data = 'tmp_rcv1x.vw.txt'
command_line=f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf'<if>learn_at_leaf<else>''} --dream_at_update {dream_at_update}\
--max_number_of_labels {max_num_labels} --dream_repeats {dream_repeats} {'--oas'<if>use_oas<else>''} \
--leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"<line_sep>os.system(command_line)<line_sep>train_time=time.time()-start<line_sep>print("## Testing...")<line_sep>start=time.time()<line_sep>os.system("../../build/vowpalwabbit/vw {} --oas {} -i {}".format(test_data use_oas saved_model))<line_sep>test_time=time.time()-start<line_sep>print("## train time {}, and test time {}".format(train_time test_time))<line_sep> |
<import_stmt>os<import_stmt>math<import_stmt>torch<import_from_stmt>torch nn optim<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.autograd Variable<import_stmt>utils<import_from_stmt>contrastqg T5ForConditionalGeneration <line_sep>logger=logging.getLogger()<class_stmt>QGenerator(object)<block_start><def_stmt>__init__ self args tokenizer<block_start>self.network=T5ForConditionalGeneration.from_pretrained(args.pretrain_generator_type)<line_sep>self.network.resize_token_embeddings(len(tokenizer))<line_sep>self.network.load_state_dict(torch.load(args.generator_load_dir+'/models.pkl'))<line_sep>logger.info("sccuess load checkpoint from {} !".format(args.generator_load_dir))<line_sep>self.tokenizer=tokenizer<line_sep>self.batchify_inputs=utils.select_gen_input_refactor(args)<block_end><def_stmt>predict self inputs<block_start>self.network.eval()<line_sep>outputs=self.network.generate(**inputs)<line_sep>pred_tokens=self.tokenizer.convert_outputs_to_tokens(outputs)<line_sep><return>pred_tokens<block_end><def_stmt>set_device self device<block_start>self.device=device<line_sep>self.network.to(self.device)<block_end><def_stmt>parallelize self<block_start>"""Use data parallel to copy the model across several gpus.
This will take all gpus visible with CUDA_VISIBLE_DEVICES.
"""<line_sep>self.parallel=<true><line_sep>self.network=torch.nn.DataParallel(self.network)<block_end><block_end> |
"""Bradford distribution."""<import_stmt>numpy<import_from_stmt>..baseclass SimpleDistribution LowerUpperDistribution<class_stmt>bradford(SimpleDistribution)<block_start>"""Standard Bradford distribution."""<def_stmt>__init__ self c=1<block_start>super(bradford self).__init__(dict(c=c))<block_end><def_stmt>_pdf self x c<block_start><return>c/(c<times>x+1.)/numpy.log(1.+c)<block_end><def_stmt>_cdf self x c<block_start><return>numpy.log(1.+c<times>x)/numpy.log(c+1.)<block_end><def_stmt>_ppf self q c<block_start><return>((1.+c)<power>q-1)/c<block_end><def_stmt>_lower self c<block_start><return>0.<block_end><def_stmt>_upper self c<block_start><return>1.<block_end><block_end><class_stmt>Bradford(LowerUpperDistribution)<block_start>"""
Bradford distribution.
Args:
shape (float, Distribution):
Shape parameter
lower (float, Distribution):
Location of lower threshold
upper (float, Distribution):
Location of upper threshold
Examples:
>>> distribution = chaospy.Bradford(0.8, 4, 6)
>>> distribution
Bradford(0.8, lower=4, upper=6)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([4. , 4.312, 4.663, 5.057, 5.501, 6. ])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([0.681, 0.605, 0.538, 0.478, 0.425, 0.378])
>>> distribution.sample(4).round(3)
array([5.171, 4.175, 5.87 , 4.819])
"""<def_stmt>__init__ self shape=1 lower=0 upper=1<block_start>super(Bradford self).__init__(dist=bradford(shape) lower=lower upper=upper repr_args=[shape] )<block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
<import_stmt>time<import_from_stmt>.experiment Experiment<import_from_stmt>.nbsvm preds_for_cell_content preds_for_cell_content_max preds_for_cell_content_multi<import_stmt>dataclasses<import_from_stmt>dataclasses dataclass<import_from_stmt>typing Tuple<import_from_stmt>axcell.helpers.training set_seed<import_from_stmt>fastai.text *<import_stmt>numpy<as>np<import_from_stmt>pathlib Path<import_stmt>json<import_stmt>argparse<import_stmt>glob<import_stmt>logging<import_stmt>os<import_stmt>random<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader RandomSampler SequentialSampler TensorDataset <import_from_stmt>torch.utils.data.distributed DistributedSampler<import_from_stmt>fastai.text *# for utilty functions
<try_stmt><block_start><import_from_stmt>torch.utils.tensorboard SummaryWriter<block_end><except_stmt><block_start><import_from_stmt>tensorboardX SummaryWriter<block_end><import_from_stmt>tqdm tqdm trange<import_stmt>tensorflow_datasets<import_from_stmt>transformers WEIGHTS_NAME BertConfig BertForSequenceClassification BertTokenizer RobertaConfig RobertaForSequenceClassification RobertaTokenizer XLMConfig XLMForSequenceClassification XLMTokenizer XLNetConfig XLNetForSequenceClassification XLNetTokenizer DistilBertConfig DistilBertForSequenceClassification DistilBertTokenizer DataProcessor InputExample AutoConfig <import_from_stmt>transformers AdamW WarmupLinearSchedule<import_from_stmt>transformers glue_compute_metrics<as>compute_metrics<import_from_stmt>transformers glue_output_modes<as>output_modes<import_from_stmt>transformers glue_processors<as>processors<import_from_stmt>transformers glue_convert_examples_to_features<as>convert_examples_to_features<import_from_stmt>transformers AutoTokenizer AutoModelForSequenceClassification glue_convert_examples_to_features<import_from_stmt>transformers.data.processors.glue glue_processors<line_sep>logger=logging.getLogger(__name__)<def_stmt>train args train_dataset valid_dataset model tokenizer<block_start>""" Train the model """<if_stmt>args.local_rank<in>[-1 0]<block_start>tb_writer=args.get_summary_writer()<block_end>train_sampler=RandomSampler(train_dataset)<if>args.local_rank<eq>-1<else>DistributedSampler(train_dataset)<line_sep>train_dataloader=DataLoader(train_dataset sampler=train_sampler batch_size=args.train_batch_size)<if_stmt>args.max_steps<g>0<block_start>t_total=args.max_steps<line_sep>args.num_train_epochs=args.max_steps<floordiv>(len(train_dataloader)<floordiv>args.gradient_accumulation_steps)+1<block_end><else_stmt><block_start>t_total=len(train_dataloader)<floordiv>args.gradient_accumulation_steps<times>args.num_train_epochs<block_end># Prepare optimizer and schedule (linear warmup and decay)
no_decay=['bias' 'LayerNorm.weight']<line_sep>optimizer_grouped_parameters=[{'params':[p<for>n,p model.named_parameters()<if><not>any(nd<in>n<for>nd no_decay)] 'weight_decay':args.weight_decay} {'params':[p<for>n,p model.named_parameters()<if>any(nd<in>n<for>nd no_decay)] 'weight_decay':0.0}]<line_sep>optimizer=AdamW(optimizer_grouped_parameters lr=args.learning_rate eps=args.adam_epsilon)<line_sep>scheduler=WarmupLinearSchedule(optimizer warmup_steps=args.warmup_steps t_total=t_total)<if_stmt>args.fp16<block_start><try_stmt><block_start><import_from_stmt>apex amp<block_end><except_stmt>ImportError<block_start><raise>ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")<block_end>model,optimizer=amp.initialize(model optimizer opt_level=args.fp16_opt_level)<block_end># multi-gpu training (should be after apex fp16 initialization)
<if_stmt>args.n_gpu<g>1<block_start>model=torch.nn.DataParallel(model)<block_end># Distributed training (should be after apex fp16 initialization)
<if_stmt>args.local_rank<ne>-1<block_start>model=torch.nn.parallel.DistributedDataParallel(model device_ids=[args.local_rank] output_device=args.local_rank find_unused_parameters=<true>)<block_end># Train!
logger.info("***** Running training *****")<line_sep>logger.info(" Num examples = %d" len(train_dataset))<line_sep>logger.info(" Num Epochs = %d" args.num_train_epochs)<line_sep>logger.info(" Instantaneous batch size per GPU = %d" args.per_gpu_train_batch_size)<line_sep>logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d" args.train_batch_size<times>args.gradient_accumulation_steps<times>(torch.distributed.get_world_size()<if>args.local_rank<ne>-1<else>1))<line_sep>logger.info(" Gradient Accumulation steps = %d" args.gradient_accumulation_steps)<line_sep>logger.info(" Total optimization steps = %d" t_total)<line_sep>global_step=0<line_sep>tr_loss,logging_loss=0.0 0.0<line_sep>model.zero_grad()<line_sep>train_iterator=range(int(args.num_train_epochs))<line_sep>set_seed(args.seed "Training" all_gpus=(args.n_gpu<g>1))# Added here for reproductibility (even between python 2 and 3)
mb=master_bar(train_iterator)<line_sep>mb.first_bar.comment=f'Epochs'<line_sep>results={}<for_stmt>epoch mb<block_start>epoch_iterator=progress_bar(train_dataloader display=args.local_rank<not><in>[-1 0] parent=mb)<for_stmt>step,batch enumerate(epoch_iterator)<block_start>model.train()<line_sep>batch=tuple(t.to(args.device)<for>t batch)<line_sep>inputs={'input_ids':batch[0] 'attention_mask':batch[1] 'labels':batch[3]}<if_stmt>args.model_type<ne>'distilbert'<block_start>inputs['token_type_ids']=batch[2]<if>args.model_type<in>['bert' 'xlnet']<else><none># XLM, DistilBERT and RoBERTa don't use segment_ids
<block_end>outputs=model(**inputs)<line_sep>loss=outputs[0]# model outputs are always tuple in transformers (see doc)
<if_stmt>args.n_gpu<g>1<block_start>loss=loss.mean()# mean() to average on multi-gpu parallel training
<block_end><if_stmt>args.gradient_accumulation_steps<g>1<block_start>loss=loss/args.gradient_accumulation_steps<block_end><if_stmt>args.fp16<block_start><with_stmt>amp.scale_loss(loss optimizer)<as>scaled_loss<block_start>scaled_loss.backward()<block_end><block_end><else_stmt><block_start>loss.backward()<block_end>tr_loss<augadd>loss.item()<if_stmt>(step+1)%args.gradient_accumulation_steps<eq>0<and><not>args.tpu<block_start><if_stmt>args.fp16<block_start>torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer) args.max_grad_norm)<block_end><else_stmt><block_start>torch.nn.utils.clip_grad_norm_(model.parameters() args.max_grad_norm)<block_end>optimizer.step()<line_sep>scheduler.step()# Update learning rate schedule
model.zero_grad()<line_sep>global_step<augadd>1<if_stmt>args.local_rank<in>[-1 0]<and>args.logging_steps<g>0<and>global_step%args.logging_steps<eq>0# Log metrics
<block_start>mb.child.comment=f"loss: {loss}"<line_sep>tb_writer.add_scalar('train/lr' scheduler.get_lr()[0] global_step)<line_sep>tb_writer.add_scalar('train/loss' (tr_loss-logging_loss)/args.logging_steps global_step)<line_sep>logging_loss=tr_loss<block_end><if_stmt>args.local_rank<in>[-1 0]<and>args.save_steps<g>0<and>global_step%args.save_steps<eq>0# Save model checkpoint
<block_start>output_dir=os.path.join(args.output_dir 'checkpoint-{}'.format(global_step))<if_stmt><not>os.path.exists(output_dir)<block_start>os.makedirs(output_dir)<block_end>model_to_save=model.module<if>hasattr(model 'module')<else>model# Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)<line_sep>torch.save(args os.path.join(output_dir 'training_args.bin'))<line_sep>logger.info("Saving model checkpoint to %s" output_dir)<block_end>#mb.first_bar.comment = f'first bar stat'
#mb.write(f'Finished loop {i}.')
<block_end><if_stmt>args.tpu<block_start>args.xla_model.optimizer_step(optimizer barrier=<true>)<line_sep>model.zero_grad()<line_sep>global_step<augadd>1<block_end><if_stmt>args.max_steps<g>0<and>global_step<g>args.max_steps<block_start>epoch_iterator.close()<line_sep><break><block_end><block_end><if_stmt>args.local_rank<eq>-1<and>args.evaluate_during_training# Only evaluate when single GPU otherwise metrics may not average well
<block_start>results=evaluate(args model valid_dataset)<for_stmt>key,value results.items()<block_start>tb_writer.add_scalar('eval/{}'.format(key) value global_step)<block_end>mb.first_bar.comment=str(results['acc'])<block_end>mb.write(f"Epoch: {epoch} {loss} Accuracy: {results.get('acc' 0)}")<if_stmt>args.max_steps<g>0<and>global_step<g>args.max_steps<block_start>train_iterator.close()<line_sep><break><block_end><block_end>hparams_dict={k:v<for>k,v dataclasses.asdict(args).items()<if>isinstance(v (int float str bool ))}<line_sep>tb_writer.add_hparams(hparam_dict=hparams_dict metric_dict=results)<if_stmt>args.local_rank<in>[-1 0]<block_start>tb_writer.close()<block_end><return>global_step tr_loss/global_step<block_end><def_stmt>evaluate args model eval_dataset prefix="" eval_output_dir="/tmp/out"# Loop to handle MNLI double evaluation (matched, mis-matched)
<block_start>results={}<line_sep>eval_task=args.task_name<if_stmt><not>os.path.exists(eval_output_dir)<and>args.local_rank<in>[-1 0]<block_start>os.makedirs(eval_output_dir)<block_end>args.eval_batch_size=args.per_gpu_eval_batch_size<times>max(1 args.n_gpu)<line_sep># Note that DistributedSampler samples randomly
eval_sampler=SequentialSampler(eval_dataset)<if>args.local_rank<eq>-1<else>DistributedSampler(eval_dataset)<line_sep>eval_dataloader=DataLoader(eval_dataset sampler=eval_sampler batch_size=args.eval_batch_size)<line_sep># Eval!
logger.info("***** Running evaluation {} *****".format(prefix))<line_sep>logger.info(" Num examples = %d" len(eval_dataset))<line_sep>logger.info(" Batch size = %d" args.eval_batch_size)<line_sep>eval_loss=0.0<line_sep>nb_eval_steps=0<line_sep>preds=<none><line_sep>out_label_ids=<none><line_sep>mb=progress_bar(eval_dataloader)<for_stmt>batch mb<block_start>model.eval()<line_sep>batch=tuple(t.to(args.device)<for>t batch)<with_stmt>torch.no_grad()<block_start>inputs={'input_ids':batch[0] 'attention_mask':batch[1] 'labels':batch[3]}<if_stmt>args.model_type<ne>'distilbert'<block_start>inputs['token_type_ids']=batch[2]<if>args.model_type<in>['bert' 'xlnet']<else><none># XLM, DistilBERT and RoBERTa don't use segment_ids
<block_end>outputs=model(**inputs)<line_sep>tmp_eval_loss,logits=outputs[:2]<line_sep>eval_loss<augadd>tmp_eval_loss.mean().item()<block_end>nb_eval_steps<augadd>1<if_stmt>preds<is><none><block_start>preds=logits.detach().cpu().numpy()<line_sep>out_label_ids=inputs['labels'].detach().cpu().numpy()<block_end><else_stmt><block_start>preds=np.append(preds logits.detach().cpu().numpy() axis=0)<line_sep>out_label_ids=np.append(out_label_ids inputs['labels'].detach().cpu().numpy() axis=0)<block_end><block_end>eval_loss=eval_loss/nb_eval_steps<if_stmt>args.output_mode<eq>"classification"<block_start>preds=np.argmax(preds axis=1)<block_end><elif_stmt>args.output_mode<eq>"regression"<block_start>preds=np.squeeze(preds)<block_end>result=compute_metrics(eval_task preds out_label_ids)<line_sep>results.update(result)<line_sep>results['loss']=eval_loss<line_sep>output_eval_file=os.path.join(eval_output_dir prefix "eval_results.txt")<with_stmt>open(output_eval_file "w")<as>writer<block_start>logger.info("***** Eval results {} *****".format(prefix))<for_stmt>key sorted(result.keys())<block_start>logger.info(" %s = %s" key str(result[key]))<line_sep>writer.write("%s = %s\n"%(key str(result[key])))<block_end><block_end><return>results<block_end><def_stmt>prepare_glue_examples tokenizer task_name='mrpc' split_name='train'<block_start>processor=glue_processors[task_name]()<def_stmt>tf_mrpc_to_pytorch d<block_start><for_stmt>ex d<block_start>ex=processor.get_example_from_tensor_dict(ex)<line_sep># ex = processor.tfds_map(ex)
<yield>ex<block_end><block_end>tf_data=tensorflow_datasets.load(f"glue/{task_name}")[split_name]<line_sep>examples=tf_mrpc_to_pytorch(tf_data)<line_sep>features=glue_convert_examples_to_features(examples tokenizer max_length=128 task='mrpc')<line_sep>all_input_ids=torch.tensor([f.input_ids<for>f features] dtype=torch.long)<line_sep>all_attention_mask=torch.tensor([f.attention_mask<for>f features] dtype=torch.long)<line_sep>all_token_type_ids=torch.tensor([f.token_type_ids<for>f features] dtype=torch.long)<line_sep>all_labels=torch.tensor([f.label<for>f features] dtype=torch.long)<line_sep>dataset=TensorDataset(all_input_ids all_attention_mask all_token_type_ids all_labels)<line_sep><return>dataset<block_end><def_stmt>strip_tensors r<block_start>nr={}<for_stmt>k,v r.items()<block_start>v=v.numpy()<if_stmt>isinstance(v bytes)<block_start>v=v.decode("utf-8")<block_end><else_stmt><block_start>v=v.item()<block_end>nr[k]=v<block_end><return>nr<block_end><def_stmt>glue_dataset_to_df task_name<block_start>data=tensorflow_datasets.load(f"glue/{task_name}")<line_sep>new_dict={}<for_stmt>name,dataset data.items()<block_start>new_dict[name]=pd.DataFrame.from_records([strip_tensors(r)<for>r dataset] columns=dataset.output_shapes.keys() index='idx')<block_end><return>new_dict.get('train' <none>) new_dict.get('validation' <none>) new_dict.get('test' <none>)<block_end><def_stmt>convert_df_to_examples df text_a='sentence1' text_b='sentence2' label='label'<block_start><return>[InputExample(idx row[text_a] row[text_b] str(row[label]))<for>idx,row df.iterrows()]<block_end><def_stmt>convert_df_to_dataset tokenizer df max_length=128 task='mrpc' text_a='sentence1' text_b='sentence2' label='label' return_labels=<false><block_start>label_list=list(sorted(map(str df[label].unique())))<line_sep>examples=convert_df_to_examples(df text_a text_b label)<line_sep>features=glue_convert_examples_to_features(examples tokenizer max_length=max_length label_list=label_list output_mode='classification' task=<none>)<line_sep>all_input_ids=torch.tensor([f.input_ids<for>f features] dtype=torch.long)<line_sep>all_attention_mask=torch.tensor([f.attention_mask<for>f features] dtype=torch.long)<line_sep>all_token_type_ids=torch.tensor([f.token_type_ids<for>f features] dtype=torch.long)<line_sep>all_labels=torch.tensor([f.label<for>f features] dtype=torch.long)<line_sep>dataset=TensorDataset(all_input_ids all_attention_mask all_token_type_ids all_labels)<if_stmt>return_labels<block_start><return>dataset label_list<block_end><return>dataset<block_end>@dataclass<class_stmt>TransfoLearner()<block_start>model:nn.Module<line_sep>tokenizer:Any<line_sep>data:Any<block_end><def_stmt>get_preds args model dataset ordered=<true><block_start>eval_dataset=dataset<line_sep>eval_sampler=SequentialSampler(eval_dataset)<if>args.local_rank<eq>-1<else>DistributedSampler(eval_dataset)<line_sep>eval_dataloader=DataLoader(eval_dataset sampler=eval_sampler batch_size=args.eval_batch_size)<if_stmt>isinstance(eval_sampler DistributedSampler)<and>ordered# Note that DistributedSampler samples randomly
<block_start><raise>ValueError("Unable to run distributed get_preds with ordered == True")<block_end>logger.info("Num examples = %d" len(eval_dataset))<line_sep>logger.info("Batch size = %d" args.eval_batch_size)<line_sep>eval_loss=0.0<line_sep>nb_eval_steps=0<line_sep>mb=progress_bar(eval_dataloader)<line_sep>preds=[]<line_sep>labels=[]<try_stmt><block_start><with_stmt>torch.no_grad()<block_start>model.to(args.device)<line_sep>model.eval()<for_stmt>batch mb<block_start>batch=tuple(t.to(args.device)<for>t batch)<line_sep>inputs={'input_ids':batch[0] 'attention_mask':batch[1] 'labels':batch[3]}<if_stmt>args.model_type<ne>'distilbert'<block_start>inputs['token_type_ids']=batch[2]<if>args.model_type<in>['bert' 'xlnet']<else><none><line_sep># XLM, DistilBERT and RoBERTa don't use segment_ids
<block_end>outputs=model(**inputs)<line_sep>tmp_eval_loss,logits=outputs[:2]<line_sep>eval_loss<augadd>tmp_eval_loss.mean().item()<line_sep>nb_eval_steps<augadd>1<line_sep>preds.append(logits.detach().cpu())<line_sep>labels.append(inputs['labels'].detach().cpu())# add non_blocking=True but somehow it isn't avaliabe in our torch
<block_end><return>torch.cat(preds dim=0) torch.cat(labels dim=0)<block_end><block_end><finally_stmt><block_start>model.to("cpu")<block_end><block_end>@dataclass<class_stmt>TransfoDatabunch()<block_start>num_labels:int<line_sep>train_ds:Any<line_sep>valid_ds:Any<line_sep>test_ds:Any<block_end>@dataclass<class_stmt>TransfoExperiment(Experiment)<block_start>test_split:str=<none><line_sep>valid_split:str=<none><line_sep>text_a:str='text'<line_sep>text_b:str='cell_content'<line_sep>label:str='label'<line_sep>#@help("Model type selected in the list: ...")
model_type:str=<none><line_sep>#@help("Path to pre-trained model or shortcut name selected in the list: ...")
pretrained_name:str=<none><line_sep>#@help("The name of the task to train selected in the list: " + "".join(processors.keys()))
task_name:str=<none><line_sep>#@help("Pretrained config name or path if not the same as model_name")
config_name:str=""<line_sep>#@help("Pretrained tokenizer name or path if not the same as model_name")
tokenizer_name:str=""<line_sep>#@help("Where do you want to store the pre-trained models downloaded from s3")
cache_dir:str=""<line_sep>#@help("The maximum total input sequence length after tokenization. Sequences longer than this will be truncated sequences shorter will be padded.")
max_seq_length:int=128<line_sep>#@help("Whether to run training.")
do_train:bool=<false><line_sep>#@help("Whether to run eval on the dev set.")
do_eval:bool=<false><line_sep>#@help("Rul evaluation during training at each logging step.")
evaluate_during_training:bool=<false><line_sep>#@help("Batch size per GPU/CPU for training.")
per_gpu_train_batch_size:int=8<line_sep>#@help("Batch size per GPU/CPU for evaluation.")
per_gpu_eval_batch_size:int=8<line_sep>#@help("Number of updates steps to accumulate before performing a backward/update pass.")
gradient_accumulation_steps:int=1<line_sep>#@help("The initial learning rate for Adam.")
learning_rate:float=5e-5<line_sep>#@help("Weight deay if we apply some.")
weight_decay:float=0.0<line_sep>#@help("Epsilon for Adam optimizer.")
adam_epsilon:float=1e-8<line_sep>#@help("Max gradient norm.")
max_grad_norm:float=1.0<line_sep>#@help("Total number of training epochs to perform.")
num_train_epochs:float=3.0<line_sep>#@help("If > 0: set total number of training steps to perform. Override num_train_epochs.")
max_steps:int=-1<line_sep>#@help("Linear warmup over warmup_steps.")
warmup_steps:int=0<line_sep>#@help("Log every X updates steps.")
logging_steps:int=10<line_sep>#@help("Save checkpoint every X updates steps.")
save_steps:int=50<line_sep>#@help("Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
eval_all_checkpoints:bool=<false><line_sep>#@help("Avoid using CUDA when available")
no_cuda:bool=<false><line_sep>#@help("Overwrite the cached training and evaluation sets")
overwrite_cache:bool=<false><line_sep>#@help("random seed for initialization")
seed:int=42<line_sep>#@help("Whether to run on the TPU defined in the environment variables")
tpu:bool=<false><line_sep>#@help("TPU IP address if none are set in the environment variables")
tpu_ip_address:str=''<line_sep>#@help("TPU name if none are set in the environment variables")
tpu_name:str=''<line_sep>#@help("XRT TPU config if none are set in the environment variables")
xrt_tpu_config:str=''<line_sep>#@help("Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
fp16:bool=<false><line_sep>#@help("For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2' and 'O3']. See details at https://nvidia.github.io/apex/amp.html")
fp16_opt_level:str='O1'<line_sep>#@help("For distributed training: local_rank")
local_rank:int=-1<line_sep>#@help("For distant debugging.")
server_ip:str=''<line_sep>#@help("For distant debugging.")
server_port:str=''<line_sep>seed:int=42<line_sep># Unused
#@help("The input data dir. Should contain the .tsv files (or other data files) for the task.")
data_dir:str="/tmp/data"<line_sep>#@help("The output directory where the model predictions and checkpoints will be written.")
output_dir:str="/tmp/tmp_output_dir"<line_sep>#@help("Overwrite the content of the output directory")
overwrite_output_dir:bool=<true><def_stmt>__post_init__ self<block_start><if_stmt>os.path.exists(self.output_dir)<and>os.listdir(self.output_dir)<and>self.do_train<and><not>self.overwrite_output_dir<block_start><raise>ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(self.output_dir))<block_end># Setup distant debugging if needed
<if_stmt>self.server_ip<and>self.server_port# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
<block_start><import_stmt>ptvsd<line_sep>print("Waiting for debugger attach")<line_sep>ptvsd.enable_attach(address=(self.server_ip self.server_port) redirect_output=<true>)<line_sep>ptvsd.wait_for_attach()<block_end># Setup CUDA, GPU & distributed training
<if_stmt>self.local_rank<eq>-1<or>self.no_cuda<block_start>device=torch.device("cuda"<if>torch.cuda.is_available()<and><not>self.no_cuda<else>"cpu")<line_sep>self.n_gpu=torch.cuda.device_count()<block_end><else_stmt># Initializes the distributed backend which will take care of sychronizing nodes/GPUs
<block_start>torch.cuda.set_device(self.local_rank)<line_sep>device=torch.device("cuda" self.local_rank)<line_sep>torch.distributed.init_process_group(backend='nccl')<line_sep>self.n_gpu=1<block_end>self.device=device<line_sep>self.output_mode="classification"<line_sep>self.train_batch_size=self.per_gpu_train_batch_size<times>max(1 self.n_gpu)<line_sep>self.eval_batch_size=self.per_gpu_eval_batch_size<times>max(1 self.n_gpu)<line_sep>self._tokenizer=<none><line_sep>self._model=<none><line_sep>self._data_cache=<none><line_sep>self.train_started=<none><block_end>@property<def_stmt>tokenizer self<block_start><if_stmt>self._tokenizer<is><none><block_start>self._tokenizer=AutoTokenizer.from_pretrained(self.pretrained_name)<block_end><return>self._tokenizer<block_end>@property<def_stmt>experiment_name self<block_start><import_from_stmt>datetime datetime<import_stmt>socket<if_stmt><not>self.name<block_start>now=datetime.now()<line_sep>d=now.strftime("%y%m%d_%H%M%S")<line_sep>h="_".join(socket.gethostname().split('-'))<def_stmt>short_name name<block_start><return>"".join([p[0]<for>p name.split('_')])<block_end><def_stmt>short_val val<block_start><if_stmt>isinstance(val bool)<block_start><return>int(val)<block_end><return>val<block_end>relevant_params={k:v<for>k,v dataclasses.asdict(self).items()<if><not>k.startswith('_')<and>hasattr(TransfoExperiment k)<and>getattr(TransfoExperiment k)<ne>v}<line_sep>params=[f"{short_name(k)}_{v}"<for>k,v relevant_params.items()<if><not>isinstance(v bool)]<line_sep>bool_flags=[f"{short_name(k)}"<for>k,v relevant_params.items()<if>isinstance(v bool)<and>v]<line_sep>params_str=".".join(params+bool_flags)<line_sep>self.name=f"{d}.{h}.{params_str}"<block_end><return>self.name<block_end><def_stmt>get_summary_writer self<block_start><return>SummaryWriter("runs/"+self.experiment_name)<block_end><def_stmt>_save_predictions self path<block_start>self._dump_pickle([self._preds self._phases] path)<block_end><def_stmt>_load_predictions self path<block_start>self._preds,self._phases=self._load_pickle(path)<line_sep><return>self._preds<block_end><def_stmt>load_predictions self<block_start>path=self._path.parent/f"{self._path.stem}.preds"<line_sep><return>self._load_predictions(path)<block_end># todo: make it compatible with Experiment
<def_stmt>get_trained_model self data:TransfoDatabunch<block_start>self._model=self.train_model(data)<line_sep>self.has_model=<true><line_sep><return>self._model<block_end><def_stmt>get_glue_databunch self<block_start><return>TransfoDatabunch(train_ds=prepare_glue_examples(self.tokenizer self.task_name 'train') valid_ds=prepare_glue_examples(self.tokenizer self.task_name 'validation') test_ds=<none>)<block_end><def_stmt>get_databunch self train_df valid_df test_df<block_start>data_key=(id(train_df) id(valid_df) id(test_df))<if_stmt>self._data_cache<is><not><none><and>self._data_cache.key<ne>data_key<block_start>self._data_cache=<none><block_end>self.tokenizer.max_len=999999<if_stmt>self._data_cache<is><none><block_start>common_args=dict(text_a=self.text_a text_b=self.text_b label=self.label)<line_sep>train_ds,label_list=convert_df_to_dataset(self.tokenizer train_df return_labels=<true> **common_args)<line_sep>data=TransfoDatabunch(num_labels=len(label_list) train_ds=train_ds valid_ds=convert_df_to_dataset(self.tokenizer valid_df **common_args) test_ds=convert_df_to_dataset(self.tokenizer test_df **common_args))<line_sep>data.key=data_key<line_sep>self._data_cache=data<block_end><return>self._data_cache<block_end><def_stmt>new_experiment self **kwargs#kwargs.setdefault("has_predictions", False)
<block_start><return>super().new_experiment(**kwargs)<block_end><def_stmt>_add_phase self state<block_start><del_stmt>state['opt']<del_stmt>state['train_dl']<line_sep>self._phases.append(state)<block_end><def_stmt>set_seed self name<block_start><return>set_seed(self.seed name all_gpus=(self.n_gpu<g>1))<block_end># todo: make it compatible with Experiment
<def_stmt>train_model self data:TransfoDatabunch<block_start>self.set_seed("class")<line_sep>self.train_started=time.time()<line_sep>num_labels=data.num_labels<line_sep>config=AutoConfig.from_pretrained(self.pretrained_name num_labels=num_labels)#, finetuning_task=args.task_name
model=AutoModelForSequenceClassification.from_pretrained(self.pretrained_name config=config)<line_sep>train(self data.train_ds data.valid_ds model.to(self.device) self._tokenizer)<line_sep>model.to("cpu")<line_sep><return>model<block_end><def_stmt>_save_model self path<block_start>model_to_save=self._model.module<if>hasattr(self._model 'module')<else>self._model<line_sep># Take care of distributed/parallel training
model_to_save.save_pretrained(path)<line_sep>logger.info("Saving model checkpoint to %s" path)<block_end># todo: move to Experiment
<def_stmt>save self dir_path<block_start>dir_path=Path(dir_path)<line_sep>dir_path.mkdir(exist_ok=<true> parents=<true>)<line_sep>filename=self._get_next_exp_name(dir_path)<line_sep>j=dataclasses.asdict(self)<with_stmt>open(filename "wt")<as>f<block_start>json.dump(j f)<block_end>self._save_model(dir_path/f"{filename.stem}.model")<if_stmt>hasattr(self "_preds")<block_start>self._save_predictions(dir_path/f"{filename.stem}.preds")<block_end><return>filename.name<block_end><def_stmt>evaluate_transformers self data<block_start><return>evaluate(self self._model.to(self.device) data.valid_ds prefix="")<block_end><def_stmt>evaluate self model train_df valid_df test_df<block_start>data=self.get_databunch(train_df valid_df test_df)<line_sep>valid_probs=get_preds(self model data.valid_ds ordered=<true>)[0].cpu().numpy()<line_sep>test_probs=get_preds(self model data.test_ds ordered=<true>)[0].cpu().numpy()<line_sep>train_probs=get_preds(self model data.train_ds ordered=<true>)[0].cpu().numpy()<line_sep>self._preds=[]<for_stmt>prefix,tdf,probs zip(["train" "valid" "test"] [train_df valid_df test_df] [train_probs valid_probs test_probs])<block_start>preds=np.argmax(probs axis=1)<if_stmt>self.merge_fragments<and>self.merge_type<ne>"concat"<block_start><if_stmt>self.merge_type<eq>"vote_maj"<block_start>vote_results=preds_for_cell_content(tdf probs)<block_end><elif_stmt>self.merge_type<eq>"vote_avg"<block_start>vote_results=preds_for_cell_content_multi(tdf probs)<block_end><elif_stmt>self.merge_type<eq>"vote_max"<block_start>vote_results=preds_for_cell_content_max(tdf probs)<block_end>preds=vote_results["pred"]<line_sep>true_y=vote_results["true"]<block_end><else_stmt><block_start>true_y=tdf["label"]<line_sep>print(true_y.shape)<block_end>self._set_results(prefix preds true_y)<line_sep>self._preds.append(probs)<block_end><block_end><block_end># # schedule: Tuple = (
# # (1, 1e-2), # (a,b) -> fit_one_cyclce(a, b)
# # (1, 5e-3/2., 5e-3), # (a, b) -> freeze_to(-2); fit_one_cycle(a, b)
# # (8, 2e-3/100, 2e-3) # (a, b) -> unfreeze(); fit_one_cyccle(a, b)
# # )
# # # drop_mult: float = 0.75
# # fp16: bool = False
# pretrained_lm: str = "bert_base_cased"
# # dataset: str = None
# # train_on_easy: bool = True
# # BS: int = 64
# #
# # has_predictions: bool = False # similar to has_model, but to avoid storing pretrained models we only keep predictions
# # # that can be later used by CRF
<class_stmt>MnliProcessor(DataProcessor)<block_start>"""Processor for the MultiNLI data set (GLUE version)."""<def_stmt>get_example_from_tensor_dict self tensor_dict<block_start>"""See base class."""<line_sep><return>InputExample(tensor_dict['idx'].numpy() tensor_dict['premise'].numpy().decode('utf-8') tensor_dict['hypothesis'].numpy().decode('utf-8') str(tensor_dict['label'].numpy()))<block_end><def_stmt>get_train_examples self data_dir<block_start>"""See base class."""<line_sep><return>self._create_examples(self._read_tsv(os.path.join(data_dir "train.tsv")) "train")<block_end><def_stmt>get_dev_examples self data_dir<block_start>"""See base class."""<line_sep><return>self._create_examples(self._read_tsv(os.path.join(data_dir "dev_matched.tsv")) "dev_matched")<block_end><def_stmt>get_labels self<block_start>"""See base class."""<line_sep><return>["contradiction" "entailment" "neutral"]<block_end><def_stmt>_create_examples self lines set_type<block_start>"""Creates examples for the training and dev sets."""<line_sep>examples=[]<for_stmt>(i line) enumerate(lines)<block_start><if_stmt>i<eq>0<block_start><continue><block_end>guid="%s-%s"%(set_type line[0])<line_sep>text_a=line[8]<line_sep>text_b=line[9]<line_sep>label=line[-1]<line_sep>examples.append(InputExample(guid=guid text_a=text_a text_b=text_b label=label))<block_end><return>examples<block_end><block_end> |
<import_from_stmt>.maze_ppo_config main_config create_config<import_from_stmt>.maze_dqn_config main_config create_config<line_sep> |
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the base of all CSV file-s migrators.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>csv<import_stmt>functools<import_stmt>tempfile<import_from_stmt>typing List Optional<import_stmt>PyFunceble.facility<import_from_stmt>PyFunceble.cli.migrators.base MigratorBase<import_from_stmt>PyFunceble.cli.utils.stdout print_single_line<import_from_stmt>PyFunceble.helpers.file FileHelper<class_stmt>CSVFileMigratorBase(MigratorBase)<block_start>"""
Provides the base of all CSV file migrator classes.
"""<line_sep>source_file:Optional[str]=<none><line_sep>FIELDS:Optional[List[str]]=<none><line_sep>TO_DELETE:Optional[List[str]]=<none><def_stmt>ensure_source_file_is_given func# pylint: disable=no-self-argument
<block_start>"""
Ensures that the source file is given before launching the decorated
method.
:raise RuntimeError:
When the:code:`self.source_file` is not given.
"""<line_sep>@functools.wraps(func)<def_stmt>wrapper self *args **kwargs<block_start><if_stmt><not>isinstance(self.source_file str)<block_start><raise>RuntimeError("<self.source_file> is not given.")<block_end><return>func(self *args **kwargs)<block_end># pylint: disable=not-callable
<return>wrapper<block_end>@ensure_source_file_is_given<def_stmt>migrate self<arrow>"MigratorBase"<block_start>"""
Provides the migrator (itself).
"""<line_sep>file_helper=FileHelper(self.source_file)<if_stmt>file_helper.exists()<block_start><with_stmt>file_helper.open("r" encoding="utf-8")<as>file_stream<block_start>first_line=next(file_stream)<block_end><if_stmt>any(x<in>first_line<for>x self.TO_DELETE)<block_start>temp_destination=tempfile.NamedTemporaryFile("a+" newline="" encoding="utf-8" delete=<false>)<line_sep>file_handler=file_helper.open(newline="")<line_sep>reader=csv.DictReader(file_handler)<line_sep>writer=csv.DictWriter(temp_destination fieldnames=[x<for>x self.FIELDS<if>x<not><in>self.TO_DELETE] )<line_sep>writer.writeheader()<line_sep>keys_found=<false><for_stmt>row reader<block_start>row=dict(row)<for_stmt>key self.TO_DELETE<block_start><if_stmt>key<in>row<block_start><del_stmt>row[key]<line_sep>keys_found=<true><block_end><block_end><if_stmt><not>keys_found<block_start><break><block_end>writer.writerow(row)<if_stmt>self.print_action_to_stdout<block_start>print_single_line()<block_end><block_end>temp_destination.seek(0)<line_sep>FileHelper(temp_destination.name).move(self.source_file)<block_end><block_end>self.done=<true><block_end><def_stmt>start self<arrow>"MigratorBase"<block_start>"""
Starts the migration and everything related to it.
"""<line_sep>PyFunceble.facility.Logger.info("Started migration.")<line_sep>self.migrate()<line_sep>PyFunceble.facility.Logger.info("Finished migration.")<line_sep><return>self<block_end><block_end> |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>itertools<import_from_stmt>..utils.logger get_logger<line_sep>logger=get_logger(__name__)<def_stmt>product_dict d<block_start>keys=d.keys()<line_sep>vals=d.values()<for_stmt>instance itertools.product(*vals)<block_start><yield>dict(zip(keys instance))<block_end><block_end><def_stmt>check_params algo_name config supported_params<block_start>""" Check algorithm parameters in config
:param algo_name: name of algorithm
:param config: config with parameters to check
:param supported_params: parameters supported by algorithm
"""<for_stmt>key,value config.items()<block_start><if_stmt>key<not><in>supported_params<block_start><raise>RuntimeError('Algorithm {}. Unknown parameter: {}'.format(algo_name key))<block_end><if_stmt>isinstance(value dict)<block_start><if_stmt>isinstance(supported_params[key] dict)<block_start>check_params(algo_name value supported_params[key])<block_end><else_stmt><block_start><raise>RuntimeError('Algorithm {}. Wrong structure for parameter: {}'.format(algo_name key))<block_end><block_end><block_end><block_end> |
<import_stmt>os<import_from_stmt>qtpy QtCore<as>QC<import_from_stmt>qtpy QtGui<as>QG<import_from_stmt>qtpy QtWidgets<as>QW<import_from_stmt>hydrus.core HydrusConstants<as>HC<import_from_stmt>hydrus.core HydrusGlobals<as>HG<import_from_stmt>hydrus.core HydrusPaths<import_from_stmt>hydrus.core HydrusText<import_from_stmt>hydrus.client ClientExporting<import_from_stmt>hydrus.client.gui ClientGUIFunctions<import_from_stmt>hydrus.client.gui QtPorting<as>QP<line_sep># we do this because some programs like discord will disallow exports with additional custom mimetypes (like 'application/hydrus-files')
# as this is only ever an internal transfer, and as the python mimedata object is preserved through the dnd, we can just tack this info on with a subclass and python variables
<class_stmt>QMimeDataHydrusFiles(QC.QMimeData)<block_start><def_stmt>__init__ self<block_start>QC.QMimeData.__init__(self)<line_sep>self._hydrus_files=<none><block_end><def_stmt>hydrusFiles self<block_start><return>self._hydrus_files<block_end><def_stmt>setHydrusFiles self page_key hashes<block_start>self._hydrus_files=(page_key hashes)<block_end><block_end><def_stmt>DoFileExportDragDrop window page_key media alt_down<block_start>drop_source=QG.QDrag(window)<line_sep>data_object=QMimeDataHydrusFiles()<line_sep>#
new_options=HG.client_controller.new_options<line_sep>do_secret_discord_dnd_fix=new_options.GetBoolean('secret_discord_dnd_fix')<and>alt_down<line_sep>#
client_files_manager=HG.client_controller.client_files_manager<line_sep>original_paths=[]<line_sep>media_and_original_paths=[]<line_sep>total_size=0<for_stmt>m media<block_start>hash=m.GetHash()<line_sep>mime=m.GetMime()<line_sep>total_size<augadd>m.GetSize()<line_sep>original_path=client_files_manager.GetFilePath(hash mime check_file_exists=<false>)<line_sep>original_paths.append(original_path)<line_sep>media_and_original_paths.append((m original_path))<block_end>#
discord_dnd_fix_possible=new_options.GetBoolean('discord_dnd_fix')<and>len(original_paths)<le>50<and>total_size<l>200<times>1048576<line_sep>temp_dir=HG.client_controller.temp_dir<if_stmt>do_secret_discord_dnd_fix<block_start>dnd_paths=original_paths<line_sep>flags=QC.Qt.MoveAction<block_end><elif_stmt>discord_dnd_fix_possible<and>os.path.exists(temp_dir)<block_start>fallback_filename_terms=ClientExporting.ParseExportPhrase('{hash}')<try_stmt><block_start>filename_pattern=new_options.GetString('discord_dnd_filename_pattern')<line_sep>filename_terms=ClientExporting.ParseExportPhrase(filename_pattern)<if_stmt>len(filename_terms)<eq>0<block_start><raise>Exception()<block_end><block_end><except_stmt><block_start>filename_terms=fallback_filename_terms<block_end>dnd_paths=[]<for_stmt>(m original_path) media_and_original_paths<block_start>filename=ClientExporting.GenerateExportFilename(temp_dir m filename_terms)<if_stmt>filename<eq>HC.mime_ext_lookup[m.GetMime()]<block_start>filename=ClientExporting.GenerateExportFilename(temp_dir m fallback_filename_terms)<block_end>dnd_path=os.path.join(temp_dir filename)<if_stmt><not>os.path.exists(dnd_path)<block_start>HydrusPaths.MirrorFile(original_path dnd_path)<block_end>dnd_paths.append(dnd_path)<block_end>flags=QC.Qt.MoveAction|QC.Qt.CopyAction<block_end><else_stmt><block_start>dnd_paths=original_paths<line_sep>flags=QC.Qt.CopyAction<block_end>uri_list=[]<for_stmt>path dnd_paths<block_start>uri_list.append(QC.QUrl.fromLocalFile(path))<block_end>data_object.setUrls(uri_list)<line_sep>#
hashes=[m.GetHash()<for>m media]<line_sep>data_object.setHydrusFiles(page_key hashes)<line_sep># old way of doing this that makes some external programs (discord) reject it
'''
if page_key is None:
encoded_page_key = None
else:
encoded_page_key = page_key.hex()
data_obj = ( encoded_page_key, [ hash.hex() for hash in hashes ] )
data_str = json.dumps( data_obj )
data_bytes = bytes( data_str, 'utf-8' )
data_object.setData( 'application/hydrus-media', data_bytes )
'''<line_sep>#
drop_source.setMimeData(data_object)<line_sep>result=drop_source.exec_(flags QC.Qt.CopyAction)<line_sep><return>result<block_end><class_stmt>FileDropTarget(QC.QObject)<block_start><def_stmt>__init__ self parent filenames_callable=<none> url_callable=<none> media_callable=<none><block_start>QC.QObject.__init__(self parent)<line_sep>self._parent=parent<if_stmt>parent<block_start>parent.setAcceptDrops(<true>)<block_end>self._filenames_callable=filenames_callable<line_sep>self._url_callable=url_callable<line_sep>self._media_callable=media_callable<block_end><def_stmt>eventFilter self object event<block_start><if_stmt>event.type()<eq>QC.QEvent.Drop<block_start><if_stmt>self.OnDrop(event.pos().x() event.pos().y())<block_start>event.setDropAction(self.OnData(event.mimeData() event.proposedAction()))<line_sep>event.accept()<block_end><block_end><elif_stmt>event.type()<eq>QC.QEvent.DragEnter<block_start>event.accept()<block_end><return><false><block_end><def_stmt>OnData self mime_data result<block_start>media_dnd=isinstance(mime_data QMimeDataHydrusFiles)<line_sep>urls_dnd=mime_data.hasUrls()<line_sep>text_dnd=mime_data.hasText()<if_stmt>media_dnd<and>self._media_callable<is><not><none><block_start>result=mime_data.hydrusFiles()<if_stmt>result<is><not><none><block_start>(page_key hashes)=result<if_stmt>page_key<is><not><none><block_start>QP.CallAfter(self._media_callable page_key hashes)<block_end><block_end># callafter so we can terminate dnd event now
result=QC.Qt.MoveAction<line_sep># old way of doing it that messed up discord et al
'''
elif mime_data.formats().count( 'application/hydrus-media' ) and self._media_callable is not None:
mview = mime_data.data( 'application/hydrus-media' )
data_bytes = mview.data()
data_str = str( data_bytes, 'utf-8' )
(encoded_page_key, encoded_hashes) = json.loads( data_str )
if encoded_page_key is not None:
page_key = bytes.fromhex( encoded_page_key )
hashes = [ bytes.fromhex( encoded_hash ) for encoded_hash in encoded_hashes ]
QP.CallAfter( self._media_callable, page_key, hashes ) # callafter so we can terminate dnd event now
result = QC.Qt.MoveAction
'''<block_end><elif_stmt>urls_dnd<or>text_dnd<block_start>paths=[]<line_sep>urls=[]<if_stmt>urls_dnd<block_start>dnd_items=mime_data.urls()<for_stmt>dnd_item dnd_items<block_start><if_stmt>dnd_item.isLocalFile()<block_start>paths.append(os.path.normpath(dnd_item.toLocalFile()))<block_end><else_stmt><block_start>urls.append(dnd_item.url())<block_end><block_end><block_end><else_stmt><block_start>text=mime_data.text()<line_sep>text_lines=HydrusText.DeserialiseNewlinedTexts(text)<for_stmt>text_line text_lines<block_start><if_stmt>text_line.startswith('http')<block_start>urls.append(text_line)<line_sep># ignore 'paths'
<block_end><block_end><block_end><if_stmt>self._filenames_callable<is><not><none><block_start><if_stmt>len(paths)<g>0<block_start>QP.CallAfter(self._filenames_callable paths)<block_end><block_end># callafter to terminate dnd event now
<if_stmt>self._url_callable<is><not><none><block_start><if_stmt>len(urls)<g>0<block_start><for_stmt>url urls<block_start>QP.CallAfter(self._url_callable url)<block_end><block_end><block_end># callafter to terminate dnd event now
result=QC.Qt.IgnoreAction<block_end><else_stmt><block_start>result=QC.Qt.IgnoreAction<block_end><return>result<block_end><def_stmt>OnDrop self x y<block_start>screen_position=ClientGUIFunctions.ClientToScreen(self._parent QC.QPoint(x y))<line_sep>drop_tlw=QW.QApplication.topLevelAt(screen_position)<line_sep>my_tlw=self._parent.window()<if_stmt>drop_tlw<eq>my_tlw<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end># setting OnDragOver to return copy gives Linux trouble with page tab drops with shift held down
<block_end> |
<import_stmt>time<import_stmt>grpc<import_from_stmt>threading Event<import_from_stmt>concurrent futures<import_from_stmt>collections Iterable<import_from_stmt>malib.rpc.proto log_pb2_grpc log_pb2<import_from_stmt>malib.utils io_wrapper<import_from_stmt>malib.utils.convert utc_to_str dump_dict<import_from_stmt>malib.utils.io_wrapper BaseIOWrapper StandardIOWrapper<class_stmt>LogServicer(log_pb2_grpc.LogRPCServicer)<block_start><def_stmt>__init__ self timeout=-1 ioers=<none><block_start>super().__init__()<line_sep>self.timeout=timeout<line_sep>self.ioers=[]<if_stmt>isinstance(ioers Iterable)<block_start><for_stmt>i ioers<block_start><assert_stmt>isinstance(i BaseIOWrapper)<line_sep>self.ioers.append(i)<block_end><block_end><elif_stmt>ioers<is><not><none><block_start><assert_stmt>isinstance(ioers BaseIOWrapper)<line_sep>self.ioers.append(ioers)<block_end><else_stmt><block_start>self.ioers.append(StandardIOWrapper())<block_end>self.alivetime=time.time()<block_end><def_stmt>Log self info context<block_start>status=0<line_sep>target=<none><try_stmt><block_start>level=int(info.log_level)<line_sep>msg=info.log_info<line_sep>st=info.send_time<line_sep>self.alivetime=time.time()<line_sep>target={"ReceiveTime":time.time() "SendTime":st "Level":level "Content":msg }<block_end><except_stmt><block_start>status=-1<line_sep>target={"ReceiveTime":time.time() "SendTime":"N/A" "Level":"N/A" "Content":"Error" }<block_end><for_stmt>i self.ioers<block_start>i.write("LoggerServer: "+dump_dict(target))<block_end><return>log_pb2.LogReply(status_code=str(status) send_time=time.time())<block_end># def stop(self):
# for i in self.ioers:
# i.write('LoggerServer: Calling server stop')
<block_end><class_stmt>LoggerServer<block_start><def_stmt>__init__ self port io_wrappers=<none> grace=5 max_workers=10<block_start>self.port=port<line_sep>self.grace=grace<line_sep>self.server=grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))<line_sep>self.io_wrappers=io_wrappers<line_sep>log_pb2_grpc.add_LogRPCServicer_to_server(LogServicer(ioers=io_wrappers) self.server)<line_sep>self.server.add_insecure_port(port)<block_end><def_stmt>start self<block_start>self.server.start()<block_end><def_stmt>stop self<block_start><for_stmt>i self.io_wrappers<block_start>i.write("LoggerServer: Calling server stop")<block_end>self.server.stop(grace=self.grace)<block_end><block_end><def_stmt>serve port<block_start>server=grpc.server(futures.ThreadPoolExecutor(max_workers=10))<line_sep>log_pb2_grpc.add_LogRPCServicer_to_server(LogServicer() server)<line_sep>server.add_insecure_port(port)<line_sep>server.start()<line_sep>server.wait_for_termination()<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.