language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def _process_cell(i, state, finite=False): """Process 3 cells and return a value from 0 to 7. """ op_1 = state[i - 1] op_2 = state[i] if i == len(state) - 1: if finite: op_3 = state[0] else: op_3 = 0 else: op_3 = state[i + 1] result = 0 for i, val in enumerate([op_3, op_2, op_1]): if val: result += 2**i return result
java
public Collection<ID> getIds(final WebQuery constraints) { return (Collection<ID>) find(constraints, JPASearchStrategy.ID).getList(); }
python
def endswith_strip(s, endswith='.txt', ignorecase=True): """ Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com' """ if ignorecase: if s.lower().endswith(endswith.lower()): return s[:-len(endswith)] else: if s.endswith(endswith): return s[:-len(endswith)] return s
python
def read_until(data: bytes, *, return_tail: bool = True, from_=None) -> bytes: """ read until some bytes appear """ return (yield (Traps._read_until, data, return_tail, from_))
python
def partition(src, key=None): """No relation to :meth:`str.partition`, ``partition`` is like :func:`bucketize`, but for added convenience returns a tuple of ``(truthy_values, falsy_values)``. >>> nonempty, empty = partition(['', '', 'hi', '', 'bye']) >>> nonempty ['hi', 'bye'] *key* defaults to :class:`bool`, but can be carefully overridden to use any function that returns either ``True`` or ``False``. >>> import string >>> is_digit = lambda x: x in string.digits >>> decimal_digits, hexletters = partition(string.hexdigits, is_digit) >>> ''.join(decimal_digits), ''.join(hexletters) ('0123456789', 'abcdefABCDEF') """ bucketized = bucketize(src, key) return bucketized.get(True, []), bucketized.get(False, [])
java
@Override public boolean validateObject(SocketAddress key, DatagramSocket socket) { return socket.isBound() && !socket.isClosed() && socket.isConnected(); }
python
def _getreply(self, user, msg, context='normal', step=0, ignore_object_errors=True): """The internal reply getter function. DO NOT CALL THIS YOURSELF. :param str user: The user ID as passed to ``reply()``. :param str msg: The formatted user message. :param str context: The reply context, one of ``begin`` or ``normal``. :param int step: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors from within Python object macros and not raise an ``ObjectError`` exception. :return str: The reply output. """ # Needed to sort replies? if 'topics' not in self.master._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Initialize the user's profile? topic = self.master.get_uservar(user, "topic") if topic in [None, "undefined"]: topic = "random" self.master.set_uservar(user, "topic", topic) # Collect data on the user. stars = [] thatstars = [] # For %Previous's. reply = '' # Avoid letting them fall into a missing topic. if topic not in self.master._topics: self.warn("User " + user + " was in an empty topic named '" + topic + "'") topic = "random" self.master.set_uservar(user, "topic", topic) # Avoid deep recursion. if step > self.master._depth: raise DeepRecursionError # Are we in the BEGIN statement? if context == 'begin': topic = '__begin__' # Initialize this user's history. history = self.master.get_uservar(user, "__history__") if type(history) is not dict or "input" not in history or "reply" not in history: history = self.default_history() self.master.set_uservar(user, "__history__", history) # More topic sanity checking. if topic not in self.master._topics: # This was handled before, which would mean topic=random and # it doesn't exist. Serious issue! raise NoDefaultRandomTopicError("no default topic 'random' was found") # Create a pointer for the matched data when we find it. matched = None matchedTrigger = None foundMatch = False # See if there were any %Previous's in this topic, or any topic related # to it. This should only be done the first time -- not during a # recursive redirection. This is because in a redirection, "lastreply" # is still gonna be the same as it was the first time, causing an # infinite loop! if step == 0: allTopics = [topic] if topic in self.master._includes or topic in self.master._lineage: # Get all the topics! allTopics = inherit_utils.get_topic_tree(self.master, topic) # Scan them all! for top in allTopics: self.say("Checking topic " + top + " for any %Previous's.") if top in self.master._sorted["thats"]: self.say("There is a %Previous in this topic!") # Do we have history yet? lastReply = history["reply"][0] # Format the bot's last reply the same way as the human's. lastReply = self.format_message(lastReply, botreply=True) self.say("lastReply: " + lastReply) # See if it's a match. for trig in self.master._sorted["thats"][top]: pattern = trig[1]["previous"] botside = self.reply_regexp(user, pattern) self.say("Try to match lastReply ({}) to {} ({})".format(lastReply, pattern, repr(botside))) # Match?? match = re.match(botside, lastReply) if match: # Huzzah! See if OUR message is right too. self.say("Bot side matched!") thatstars = match.groups() # Compare the triggers to the user's message. user_side = trig[1] subtrig = self.reply_regexp(user, user_side["trigger"]) self.say("Now try to match " + msg + " to " + user_side["trigger"]) match = re.match(subtrig, msg) if match: self.say("Found a match!") matched = trig[1] matchedTrigger = user_side["trigger"] foundMatch = True # Get the stars! stars = match.groups() break # Break if we found a match. if foundMatch: break # Break if we found a match. if foundMatch: break # Search their topic for a match to their trigger. if not foundMatch: for trig in self.master._sorted["topics"][topic]: pattern = trig[0] # Process the triggers. regexp = self.reply_regexp(user, pattern) self.say("Try to match %r against %r (%r)" % (msg, pattern, regexp.pattern)) # Python's regular expression engine is slow. Try a verbatim # match if this is an atomic trigger. isAtomic = utils.is_atomic(pattern) isMatch = False if isAtomic: # Only look for exact matches, no sense running atomic triggers # through the regexp engine. if msg == pattern: isMatch = True else: # Non-atomic triggers always need the regexp. match = re.match(regexp, msg) if match: # The regexp matched! isMatch = True # Collect the stars. stars = match.groups() if isMatch: self.say("Found a match!") matched = trig[1] foundMatch = True matchedTrigger = pattern break # Store what trigger they matched on. If their matched trigger is None, # this will be too, which is great. self.master.set_uservar(user, "__lastmatch__", matchedTrigger) if matched: for nil in [1]: # See if there are any hard redirects. if matched["redirect"]: self.say("Redirecting us to " + matched["redirect"]) redirect = self.process_tags(user, msg, matched["redirect"], stars, thatstars, step, ignore_object_errors) redirect = redirect.lower() self.say("Pretend user said: " + redirect) reply = self._getreply(user, redirect, step=(step + 1), ignore_object_errors=ignore_object_errors) break # Check the conditionals. for con in matched["condition"]: halves = re.split(RE.cond_split, con) if halves and len(halves) == 2: condition = re.match(RE.cond_parse, halves[0]) if condition: left = condition.group(1) eq = condition.group(2) right = condition.group(3) potreply = halves[1] self.say("Left: " + left + "; eq: " + eq + "; right: " + right + " => " + potreply) # Process tags all around. left = self.process_tags(user, msg, left, stars, thatstars, step, ignore_object_errors) right = self.process_tags(user, msg, right, stars, thatstars, step, ignore_object_errors) # Defaults? if len(left) == 0: left = 'undefined' if len(right) == 0: right = 'undefined' self.say("Check if " + left + " " + eq + " " + right) # Validate it. passed = False if eq == 'eq' or eq == '==': if left == right: passed = True elif eq == 'ne' or eq == '!=' or eq == '<>': if left != right: passed = True else: # Gasp, dealing with numbers here... try: left, right = int(left), int(right) if eq == '<': if left < right: passed = True elif eq == '<=': if left <= right: passed = True elif eq == '>': if left > right: passed = True elif eq == '>=': if left >= right: passed = True except: self.warn("Failed to evaluate numeric condition!") # How truthful? if passed: reply = potreply break # Have our reply yet? if len(reply) > 0: break # Process weights in the replies. bucket = [] for text in matched["reply"]: weight = 1 match = re.search(RE.weight, text) if match: weight = int(match.group(1)) if weight <= 0: self.warn("Can't have a weight <= 0!") weight = 1 for i in range(0, weight): bucket.append(text) # Get a random reply. reply = utils.random_choice(bucket) break # Still no reply? if not foundMatch: raise NoMatchError elif len(reply) == 0: raise NoReplyError self.say("Reply: " + reply) # Process tags for the BEGIN block. if context == "begin": # BEGIN blocks can only set topics and uservars. The rest happen # later! reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') reSet = re.findall(RE.set_tag, reply) for match in reSet: self.say("Set uservar " + str(match[0]) + "=" + str(match[1])) self.master.set_uservar(user, match[0], match[1]) reply = reply.replace('<set {key}={value}>'.format(key=match[0], value=match[1]), '') else: # Process more tags if not in BEGIN. reply = self.process_tags(user, msg, reply, stars, thatstars, step, ignore_object_errors) return reply
java
public String getBrowserBasedAuthenticationMechanism() { if (m_useBrowserBasedHttpAuthentication) { return AUTHENTICATION_BASIC; } else if (m_browserBasedAuthenticationMechanism != null) { return m_browserBasedAuthenticationMechanism; } else if (m_formBasedHttpAuthenticationUri != null) { return AUTHENTICATION_FORM; } else { return null; } }
python
def mssql_transaction_count(engine_or_conn: Union[Connection, Engine]) -> int: """ For Microsoft SQL Server specifically: fetch the value of the ``TRANCOUNT`` variable (see e.g. https://docs.microsoft.com/en-us/sql/t-sql/functions/trancount-transact-sql?view=sql-server-2017). Returns ``None`` if it can't be found (unlikely?). """ sql = "SELECT @@TRANCOUNT" with contextlib.closing( engine_or_conn.execute(sql)) as result: # type: ResultProxy # noqa row = result.fetchone() return row[0] if row else None
java
public boolean addCassandraHost(CassandraHost cassandraHost) { Properties props = KunderaMetadataManager.getPersistenceUnitMetadata(kunderaMetadata, getPersistenceUnit()) .getProperties(); String keyspace = null; if (externalProperties != null) { keyspace = (String) externalProperties.get(PersistenceProperties.KUNDERA_KEYSPACE); } if (keyspace == null) { keyspace = (String) props.get(PersistenceProperties.KUNDERA_KEYSPACE); } String poolName = PelopsUtils.generatePoolName(cassandraHost.getHost(), cassandraHost.getPort(), keyspace); Cluster cluster = new Cluster(cassandraHost.getHost(), new IConnection.Config(cassandraHost.getPort(), true, -1, PelopsUtils.getAuthenticationRequest(cassandraHost.getUser(), cassandraHost.getPassword())), false); Policy policy = PelopsUtils.getPoolConfigPolicy(cassandraHost); try { Pelops.addPool(poolName, cluster, keyspace, policy, null); hostPools.put(cassandraHost, Pelops.getDbConnPool(poolName)); return true; } catch (TransportException e) { logger.warn("Node {} are still down ", cassandraHost.getHost()); return false; } }
java
public static String bytesToString(byte[] bytes, int offset, int length) { return new String(bytes, offset, length, UTF8_CHARSET); }
java
public TenantDefinition searchForTenant(TenantFilter filter) { checkServiceState(); for (TenantDefinition tenantDef : getAllTenantDefs().values()) { if (filter.selectTenant(tenantDef)) { return tenantDef; } } return null; }
python
def resolve_config_file_path(self, config_filepath): """ Determines whether given path is valid, and if so uses it. Otherwise searches both the working directory and nomenclate/core for the specified config file. :param config_filepath: str, file path or relative file name within package :return: str, resolved full file path to the config file """ search_paths = [config_filepath, os.path.normpath(os.path.join(os.getcwd(), config_filepath)), os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), config_filepath))] for search_path in search_paths: try: self.validate_config_file(search_path) return search_path except (IOError, OSError): pass raise errors.SourceError( 'No config file found in current working directory or nomenclate/core and %s is not a valid YAML file')
java
public IfcSoundScaleEnum createIfcSoundScaleEnumFromString(EDataType eDataType, String initialValue) { IfcSoundScaleEnum result = IfcSoundScaleEnum.get(initialValue); if (result == null) throw new IllegalArgumentException( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'"); return result; }
python
def _from_dict(cls, _dict): """Initialize a Classifier object from a json dictionary.""" args = {} if 'classifier_id' in _dict: args['classifier_id'] = _dict.get('classifier_id') else: raise ValueError( 'Required property \'classifier_id\' not present in Classifier JSON' ) if 'name' in _dict: args['name'] = _dict.get('name') else: raise ValueError( 'Required property \'name\' not present in Classifier JSON') if 'owner' in _dict: args['owner'] = _dict.get('owner') if 'status' in _dict: args['status'] = _dict.get('status') if 'core_ml_enabled' in _dict: args['core_ml_enabled'] = _dict.get('core_ml_enabled') if 'explanation' in _dict: args['explanation'] = _dict.get('explanation') if 'created' in _dict: args['created'] = string_to_datetime(_dict.get('created')) if 'classes' in _dict: args['classes'] = [ Class._from_dict(x) for x in (_dict.get('classes')) ] if 'retrained' in _dict: args['retrained'] = string_to_datetime(_dict.get('retrained')) if 'updated' in _dict: args['updated'] = string_to_datetime(_dict.get('updated')) return cls(**args)
python
def sha(self): """Return sha, lazily compute if not done yet.""" if self._sha is None: self._sha = compute_auth_key(self.userid, self.password) return self._sha
java
public void logFirstSelectQueryForCurrentThread() { String firstSelectQuery = getSelectQueriesForCurrentThread() .stream() .findFirst() .map(CircularQueueCaptureQueriesListener::formatQueryAsSql) .orElse("NONE FOUND"); ourLog.info("First select Query:\n{}", firstSelectQuery); }
java
private void publishFileSet(CopyEntity.DatasetAndPartition datasetAndPartition, Collection<WorkUnitState> datasetWorkUnitStates) throws IOException { Map<String, String> additionalMetadata = Maps.newHashMap(); Preconditions.checkArgument(!datasetWorkUnitStates.isEmpty(), "publishFileSet received an empty collection work units. This is an error in code."); CopyableDatasetMetadata metadata = CopyableDatasetMetadata .deserialize(datasetWorkUnitStates.iterator().next().getProp(CopySource.SERIALIZED_COPYABLE_DATASET)); Path datasetWriterOutputPath = new Path(this.writerOutputDir, datasetAndPartition.identifier()); log.info("Merging all split work units."); DistcpFileSplitter.mergeAllSplitWorkUnits(this.fs, datasetWorkUnitStates); log.info(String.format("[%s] Publishing fileSet from %s for dataset %s", datasetAndPartition.identifier(), datasetWriterOutputPath, metadata.getDatasetURN())); List<CommitStep> prePublish = getCommitSequence(datasetWorkUnitStates, PrePublishStep.class); List<CommitStep> postPublish = getCommitSequence(datasetWorkUnitStates, PostPublishStep.class); log.info(String.format("[%s] Found %d prePublish steps and %d postPublish steps.", datasetAndPartition.identifier(), prePublish.size(), postPublish.size())); executeCommitSequence(prePublish); if (hasCopyableFiles(datasetWorkUnitStates)) { // Targets are always absolute, so we start moving from root (will skip any existing directories). HadoopUtils.renameRecursively(this.fs, datasetWriterOutputPath, new Path("/")); } else { log.info(String.format("[%s] No copyable files in dataset. Proceeding to postpublish steps.", datasetAndPartition.identifier())); } executeCommitSequence(postPublish); this.fs.delete(datasetWriterOutputPath, true); long datasetOriginTimestamp = Long.MAX_VALUE; long datasetUpstreamTimestamp = Long.MAX_VALUE; Optional<String> fileSetRoot = Optional.<String>absent(); for (WorkUnitState wus : datasetWorkUnitStates) { if (wus.getWorkingState() == WorkingState.SUCCESSFUL) { wus.setWorkingState(WorkUnitState.WorkingState.COMMITTED); } CopyEntity copyEntity = CopySource.deserializeCopyEntity(wus); if (copyEntity instanceof CopyableFile) { CopyableFile copyableFile = (CopyableFile) copyEntity; DataFileVersionStrategy srcVS = this.srcDataFileVersionStrategy; DataFileVersionStrategy dstVS = this.dstDataFileVersionStrategy; // Prefer to use copyableFile's specific version strategy if (copyableFile.getDataFileVersionStrategy() != null) { Config versionStrategyConfig = ConfigFactory.parseMap(ImmutableMap.of( DataFileVersionStrategy.DATA_FILE_VERSION_STRATEGY_KEY, copyableFile.getDataFileVersionStrategy())); srcVS = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.srcFs, versionStrategyConfig); dstVS = DataFileVersionStrategy.instantiateDataFileVersionStrategy(this.fs, versionStrategyConfig); } if (copyableFile.getPreserve().preserve(PreserveAttributes.Option.VERSION) && dstVS.hasCharacteristic(DataFileVersionStrategy.Characteristic.SETTABLE)) { dstVS.setVersion(copyableFile.getDestination(), srcVS.getVersion(copyableFile.getOrigin().getPath())); } if (wus.getWorkingState() == WorkingState.COMMITTED) { CopyEventSubmitterHelper.submitSuccessfulFilePublish(this.eventSubmitter, copyableFile, wus); // Dataset Output path is injected in each copyableFile. // This can be optimized by having a dataset level equivalent class for copyable entities // and storing dataset related information, e.g. dataset output path, there. // Currently datasetOutputPath is only present for hive datasets. if (!fileSetRoot.isPresent() && copyableFile.getDatasetOutputPath() != null) { fileSetRoot = Optional.of(copyableFile.getDatasetOutputPath()); } if (lineageInfo.isPresent()) { lineageInfo.get().putDestination(copyableFile.getDestinationData(), 0, wus); } } if (datasetOriginTimestamp > copyableFile.getOriginTimestamp()) { datasetOriginTimestamp = copyableFile.getOriginTimestamp(); } if (datasetUpstreamTimestamp > copyableFile.getUpstreamTimestamp()) { datasetUpstreamTimestamp = copyableFile.getUpstreamTimestamp(); } } } // if there are no valid values for datasetOriginTimestamp and datasetUpstreamTimestamp, use // something more readable if (Long.MAX_VALUE == datasetOriginTimestamp) { datasetOriginTimestamp = 0; } if (Long.MAX_VALUE == datasetUpstreamTimestamp) { datasetUpstreamTimestamp = 0; } additionalMetadata.put(SlaEventKeys.SOURCE_URI, this.state.getProp(SlaEventKeys.SOURCE_URI)); additionalMetadata.put(SlaEventKeys.DESTINATION_URI, this.state.getProp(SlaEventKeys.DESTINATION_URI)); additionalMetadata.put(SlaEventKeys.DATASET_OUTPUT_PATH, fileSetRoot.or("Unknown")); CopyEventSubmitterHelper.submitSuccessfulDatasetPublish(this.eventSubmitter, datasetAndPartition, Long.toString(datasetOriginTimestamp), Long.toString(datasetUpstreamTimestamp), additionalMetadata); }
java
public byte[] getMemberData(String memberId) throws ZooKeeperConnectionException, KeeperException, InterruptedException { return zkClient.get().getData(getMemberPath(memberId), false, null); }
python
def split_filename(name): """ Splits the filename into three parts: the name part, the hash part, and the extension. Like with the extension, the hash part starts with a dot. """ parts = hashed_filename_re.match(name).groupdict() return (parts['name'] or '', parts['hash'] or '', parts['ext'] or '')
python
def check_roles(self, account, aws_policies, aws_roles): """Iterate through the roles of a specific account and create or update the roles if they're missing or does not match the roles from Git. Args: account (:obj:`Account`): The account to check roles on aws_policies (:obj:`dict` of `str`: `dict`): A dictionary containing all the policies for the specific account aws_roles (:obj:`dict` of `str`: `dict`): A dictionary containing all the roles for the specific account Returns: `None` """ self.log.debug('Checking roles for {}'.format(account.account_name)) max_session_duration = self.dbconfig.get('role_timeout_in_hours', self.ns, 8) * 60 * 60 sess = get_aws_session(account) iam = sess.client('iam') # Build a list of default role policies and extra account specific role policies account_roles = copy.deepcopy(self.cfg_roles) if account.account_name in self.git_policies: for role in self.git_policies[account.account_name]: if role in account_roles: account_roles[role]['policies'] += list(self.git_policies[account.account_name][role].keys()) for role_name, data in list(account_roles.items()): if role_name not in aws_roles: iam.create_role( Path='/', RoleName=role_name, AssumeRolePolicyDocument=json.dumps(data['trust'], indent=4), MaxSessionDuration=max_session_duration ) self.log.info('Created role {}/{}'.format(account.account_name, role_name)) else: try: if aws_roles[role_name]['MaxSessionDuration'] != max_session_duration: iam.update_role( RoleName=aws_roles[role_name]['RoleName'], MaxSessionDuration=max_session_duration ) self.log.info('Adjusted MaxSessionDuration for role {} in account {} to {} seconds'.format( role_name, account.account_name, max_session_duration )) except ClientError: self.log.exception('Unable to adjust MaxSessionDuration for role {} in account {}'.format( role_name, account.account_name )) aws_role_policies = [x['PolicyName'] for x in iam.list_attached_role_policies( RoleName=role_name)['AttachedPolicies'] ] aws_role_inline_policies = iam.list_role_policies(RoleName=role_name)['PolicyNames'] cfg_role_policies = data['policies'] missing_policies = list(set(cfg_role_policies) - set(aws_role_policies)) extra_policies = list(set(aws_role_policies) - set(cfg_role_policies)) if aws_role_inline_policies: self.log.info('IAM Role {} on {} has the following inline policies: {}'.format( role_name, account.account_name, ', '.join(aws_role_inline_policies) )) if self.dbconfig.get('delete_inline_policies', self.ns, False) and self.manage_roles: for policy in aws_role_inline_policies: iam.delete_role_policy(RoleName=role_name, PolicyName=policy) auditlog( event='iam.check_roles.delete_inline_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policy': policy } ) if missing_policies: self.log.info('IAM Role {} on {} is missing the following policies: {}'.format( role_name, account.account_name, ', '.join(missing_policies) )) if self.manage_roles: for policy in missing_policies: iam.attach_role_policy(RoleName=role_name, PolicyArn=aws_policies[policy]['Arn']) auditlog( event='iam.check_roles.attach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': aws_policies[policy]['Arn'] } ) if extra_policies: self.log.info('IAM Role {} on {} has the following extra policies applied: {}'.format( role_name, account.account_name, ', '.join(extra_policies) )) for policy in extra_policies: if policy in aws_policies: polArn = aws_policies[policy]['Arn'] elif policy in self.aws_managed_policies: polArn = self.aws_managed_policies[policy]['Arn'] else: polArn = None self.log.info('IAM Role {} on {} has an unknown policy attached: {}'.format( role_name, account.account_name, policy )) if self.manage_roles and polArn: iam.detach_role_policy(RoleName=role_name, PolicyArn=polArn) auditlog( event='iam.check_roles.detach_role_policy', actor=self.ns, data={ 'account': account.account_name, 'roleName': role_name, 'policyArn': polArn } )
java
public static <K1, V1, K2, V2> void addMapper(JobConf job, Class<? extends Mapper<K1, V1, K2, V2>> klass, Class<? extends K1> inputKeyClass, Class<? extends V1> inputValueClass, Class<? extends K2> outputKeyClass, Class<? extends V2> outputValueClass, boolean byValue, JobConf mapperConf) { job.setOutputKeyClass(outputKeyClass); job.setOutputValueClass(outputValueClass); Chain.addMapper(false, job, klass, inputKeyClass, inputValueClass, outputKeyClass, outputValueClass, byValue, mapperConf); }
python
def input_fn(dataset, filepattern, skip_random_fraction_when_training, batch_size_means_tokens_param, batch_size_multiplier, max_length, mode, hparams, data_dir=None, params=None, config=None, force_repeat=False, prevent_repeat=False): """Builds input pipeline for problem. Args: dataset: the dataset to make input function from. filepattern: the pattern of files to read from. skip_random_fraction_when_training: whether to skip randomly when training. batch_size_means_tokens_param: whether batch size should mean tokens. batch_size_multiplier: how to multiply batch size when bucketing. max_length: maximum length, mode: tf.estimator.ModeKeys hparams: HParams, model hparams data_dir: str, data directory; if None, will use hparams.data_dir params: dict, may include "batch_size" config: RunConfig; should have the data_parallelism attribute if not using TPU force_repeat: bool, whether to repeat the data even if not training prevent_repeat: bool, whether to not repeat when in training mode. Overrides force_repeat. Returns: (features_dict<str name, Tensor feature>, Tensor targets) """ is_training = mode == tf.estimator.ModeKeys.TRAIN if config and config.use_tpu: num_threads = 64 else: num_threads = cpu_count() if is_training else 1 if config and hasattr(config, "data_parallelism") and config.data_parallelism: num_shards = config.data_parallelism.n else: num_shards = 1 mlperf_log.transformer_print( key=mlperf_log.INPUT_MAX_LENGTH, value=max_length) def tpu_valid_size(example): return example_valid_size(example, hparams.min_length, max_length) def gpu_valid_size(example): drop_long_sequences = is_training or hparams.eval_drop_long_sequences max_validate_length = max_length if drop_long_sequences else 10**9 return example_valid_size(example, hparams.min_length, max_validate_length) def define_shapes(example): batch_size = config and config.use_tpu and params["batch_size"] return standardize_shapes(example, batch_size=batch_size) # Read and preprocess data_dir = data_dir or (hasattr(hparams, "data_dir") and hparams.data_dir) if (force_repeat or is_training) and not prevent_repeat: # Repeat and skip a random number of records dataset = dataset.repeat() if is_training and skip_random_fraction_when_training: data_files = tf.contrib.slim.parallel_reader.get_data_files(filepattern) # In continuous_train_and_eval when switching between train and # eval, this input_fn method gets called multiple times and it # would give you the exact same samples from the last call # (because the Graph seed is set). So this skip gives you some # shuffling. dataset = skip_random_fraction(dataset, data_files[0]) dataset = dataset.map(cast_ints_to_int32, num_parallel_calls=num_threads) if batch_size_means_tokens_param: batch_size_means_tokens = True else: if _are_shapes_fully_defined(dataset.output_shapes): batch_size_means_tokens = False else: tf.logging.warning( "Shapes are not fully defined. Assuming batch_size means tokens.") batch_size_means_tokens = True # Batching if not batch_size_means_tokens: # Batch size means examples per datashard. if config and config.use_tpu: # on TPU, we use params["batch_size"], which specifies the number of # examples across all datashards batch_size = params["batch_size"] dataset = dataset.batch(batch_size, drop_remainder=True) else: batch_size = hparams.batch_size * num_shards dataset = dataset.batch(batch_size) else: # batch_size means tokens per datashard if config and config.use_tpu: dataset = dataset.filter(tpu_valid_size) padded_shapes = pad_for_tpu(dataset.output_shapes, hparams, max_length) # on TPU, we use params["batch_size"], which specifies the number of # examples across all datashards batch_size = params["batch_size"] if hparams.pad_batch: tf.logging.warn( "Padding the batch to ensure that remainder eval batches are " "processed. This may lead to incorrect metrics for " "non-zero-padded features, e.g. images. Use a smaller batch " "size that has no remainder in that case.") dataset = dataset.padded_batch( batch_size, padded_shapes, drop_remainder=False) dataset = dataset.map( functools.partial(pad_batch, batch_multiple=batch_size), num_parallel_calls=num_threads) else: dataset = dataset.padded_batch( batch_size, padded_shapes, drop_remainder=True) else: # On GPU, bucket by length dataset = dataset.filter(gpu_valid_size) cur_batching_scheme = hparams_to_batching_scheme( hparams, shard_multiplier=num_shards, length_multiplier=batch_size_multiplier) if hparams.use_fixed_batch_size: # Here batch_size really means examples per datashard. cur_batching_scheme["batch_sizes"] = [hparams.batch_size] cur_batching_scheme["boundaries"] = [] dataset = dataset.apply( tf.data.experimental.bucket_by_sequence_length( example_length, cur_batching_scheme["boundaries"], cur_batching_scheme["batch_sizes"])) if not is_training: batch_multiple = num_shards if hparams.use_fixed_batch_size: # Make sure the last batch has the same fixed size as the rest. batch_multiple *= hparams.batch_size if batch_multiple > 1: tf.logging.warn( "Padding the batch to ensure that remainder eval batches have " "a batch size divisible by the number of data shards. This may " "lead to incorrect metrics for non-zero-padded features, e.g. " "images. Use a single datashard (i.e. 1 GPU) in that case.") dataset = dataset.map( functools.partial(pad_batch, batch_multiple=batch_multiple), num_parallel_calls=num_threads) dataset = dataset.map(define_shapes, num_parallel_calls=num_threads) # Add shuffling for training batches. This is necessary along with record # level shuffling in the dataset generation. Record shuffling will shuffle # the examples. However, in some cases, it's possible that the shuffle # buffer size for record shuffling is smaller than the batch size. In such # cases, adding batch shuffling ensures that the data is in random order # during training if (is_training and hasattr(hparams, "batch_shuffle_size") and hparams.batch_shuffle_size): dataset = dataset.shuffle(hparams.batch_shuffle_size) # Split batches into chunks if targets are too long. # The new "chunk_number" feature is 0 for the first chunk and goes up then. # Chunks are reversed so the 0th chunk comes first, then the 1st and so on, # so models can attend to them in the order they arrive. The last chunk is # usually the one containing the end of the target sentence (EOS). chunk_length = hparams.get("split_targets_chunk_length", 0) max_chunks = hparams.get("split_targets_max_chunks", 100) if chunk_length > 0: def is_nonzero_chunk(example): """A chunk is zero if all targets are 0s.""" return tf.less(0, tf.reduce_sum(tf.abs(example["targets"]))) def split_on_length(example): """Split a batch of ditcs on length.""" x = example["targets"] # TODO(kitaev): This code breaks if chunk_length * max_chunks < batch_size length_diff = chunk_length * max_chunks - tf.shape(x)[1] padded_x = tf.pad(x, [(0, 0), (0, length_diff), (0, 0), (0, 0)]) chunks = [padded_x[:, i*chunk_length:(i+1)*chunk_length, :, :] for i in range(max_chunks - 1)] chunks.append(padded_x[:, (max_chunks - 1)*chunk_length:, :, :]) new_example = {} # Setting chunk_number to be tf.range(max_chunks) is incompatible with TPU new_example["chunk_number"] = tf.concat([ tf.expand_dims(tf.ones_like(c) * n, axis=0) for n, c in enumerate(chunks) ], axis=0) new_example["targets"] = tf.concat( [tf.expand_dims(c, axis=0) for c in chunks], axis=0) for k in example: if k != "targets": assert k != "chunk_number", ( "Chunking code expects the chunk_number feature name to be " "available" ) new_example[k] = tf.concat( [tf.expand_dims(example[k], axis=0) for _ in range(max_chunks)], axis=0) return tf.data.Dataset.from_tensor_slices(new_example) dataset = dataset.flat_map(split_on_length) dataset = dataset.filter(is_nonzero_chunk) # The chunking data pipeline thus far creates batches of examples where all # of the examples have the same chunk number. This can lead to periodic # fluctuations in the loss; for example, when all examples in the batch have # chunk number 0 the loss may be higher than midway through a sequence. # Enabling split_targets_strided_training adjusts the data so that each # batch includes examples at various points within a sequence. if is_training and hparams.split_targets_strided_training: # TODO(kitaev): make sure that shape inference works on GPU, not just TPU. inferred_batch_size = dataset.output_shapes["targets"].as_list()[0] if inferred_batch_size is None: raise ValueError( "Strided training is only implemented when the batch size can be " "inferred statically, for example when training on TPU." ) chunk_stride = inferred_batch_size * max( 1, max_chunks // inferred_batch_size) + 1 def collapse_nested_datasets(example): """Converts a dataset of datasets to a dataset of tensor features.""" new_example = {} for k, v in example.items(): v = tf.data.experimental.get_single_element( v.batch(inferred_batch_size, drop_remainder=True)) new_example[k] = v return tf.data.Dataset.from_tensor_slices(new_example) dataset = dataset.apply(tf.data.experimental.unbatch()) dataset = dataset.window(inferred_batch_size, inferred_batch_size, chunk_stride) dataset = dataset.flat_map(collapse_nested_datasets) dataset = dataset.batch(inferred_batch_size, drop_remainder=True) def prepare_for_output(example): if not config or not config.use_tpu: _summarize_features(example, num_shards) if mode == tf.estimator.ModeKeys.PREDICT: example["infer_targets"] = example.pop("targets") return example else: return example, example["targets"] dataset = dataset.map(prepare_for_output, num_parallel_calls=num_threads) dataset = dataset.prefetch(2) if mode == tf.estimator.ModeKeys.PREDICT: # This is because of a bug in the Estimator that short-circuits prediction # if it doesn't see a QueueRunner. DummyQueueRunner implements the # minimal expected interface but does nothing. tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, DummyQueueRunner()) return dataset
java
private int loadTable(long uuid) throws IOException { int bufferIndex = -1; Set<Long> uuids = null; // Always attempt to append to the last block first. int blockIndex = _blocks.size() - 1; TableBlock lastBlock = _blocks.get(blockIndex); // Even though this is a while loop it will run at most twice: once if there is room in the current final // block, and a second time if there wasn't and a new block needed to be allocated. while (bufferIndex == -1) { Pair<Integer, Set<Long>> bufferIndexAndUuuids = lastBlock.writeTable(uuid); bufferIndex = bufferIndexAndUuuids.left; if (bufferIndex == -1) { blockIndex++; lastBlock = new TableBlock(blockIndex * _blockSize); _blocks.add(lastBlock); } else { uuids = bufferIndexAndUuuids.right; } } int index = toIndex(blockIndex, bufferIndex); // Map each UUID associated with the table to the table's index for (Long tableUuid : uuids) { _fileIndexByUuid.put(tableUuid, index); } return index; }
python
def rmon_alarm_entry_alarm_owner(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") alarm_entry = ET.SubElement(rmon, "alarm-entry") alarm_index_key = ET.SubElement(alarm_entry, "alarm-index") alarm_index_key.text = kwargs.pop('alarm_index') alarm_owner = ET.SubElement(alarm_entry, "alarm-owner") alarm_owner.text = kwargs.pop('alarm_owner') callback = kwargs.pop('callback', self._callback) return callback(config)
java
public void init(final Connection connection, final String host, final String port) throws DevFailed { connection.url = new TangoUrl(buildUrlName(TangoUrl.getCanonicalName(host), port)); connection.setDevice_is_dbase(true); connection.transparent_reconnection = true; // Always true for Database ApiUtil.get_orb(); connect_to_dbase(connection); connection.devname = connection.device.name(); connection.setAlready_connected(true); }
java
public static <E> List<E> buildList(Iterable<E> iterable) { return StreamSupport.stream(iterable.spliterator(), false) .collect(toList()); }
java
public String convertIndexToString(int index) { String tempString = null; if ((index >= 0) && (index <= 9)) { char value[] = new char[1]; value[0] = (char)('0' + index); tempString = new String(value); // 1 = '1'; 2 = '2'; etc... } else tempString = Constants.BLANK; return tempString; }
python
def visit_ImportFrom(self, node): """ Register imported modules and usage symbols. """ module_path = tuple(node.module.split('.')) self.imports.add(module_path[0]) for alias in node.names: path = module_path + (alias.name,) self.symbols[alias.asname or alias.name] = path self.update = True return None
java
@Override public void enterGraph(GDLParser.GraphContext graphContext) { inGraph = true; String variable = getVariable(graphContext.header()); Graph g; if (variable != null && userGraphCache.containsKey(variable)) { g = userGraphCache.get(variable); } else { g = initNewGraph(graphContext); if (variable != null) { userGraphCache.put(variable, g); } else { variable = String.format(ANONYMOUS_GRAPH_VARIABLE, g.getId()); autoGraphCache.put(variable, g); } g.setVariable(variable); graphs.add(g); } currentGraphId = g.getId(); }
python
def set_proxy(proxy_url, transport_proxy=None): """Create the proxy to PyPI XML-RPC Server""" global proxy, PYPI_URL PYPI_URL = proxy_url proxy = xmlrpc.ServerProxy( proxy_url, transport=RequestsTransport(proxy_url.startswith('https://')), allow_none=True)
java
public UpdateForClause in(String variable, String path) { Expression in = x(variable + " IN " + path); vars.add(in); return this; }
python
def set(ctx, key, value): """ Set configuration parameters """ if key == "default_account" and value[0] == "@": value = value[1:] ctx.bitshares.config[key] = value
python
def current_spi_to_number(self): """ Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page. """ if self.slots['subpage'] == None: return self.sub_pi_to_number(0, 0) else: return self.sub_pi_to_number(self.slots['subpage'], self.slots['subitem'])
python
def run(self) -> Generator[Tuple[int, int, str, type], None, None]: """ Runs the checker. ``fix_file()`` only mutates the buffer object. It is the only way to find out if some error happened. """ if self.filename != STDIN: buffer = StringIO() options = _Options(aggressive=self.options.eradicate_aggressive) fix_file(self.filename, options, buffer) traceback = buffer.getvalue() if traceback: yield 1, 0, self._error(traceback), type(self)
java
protected final PrcCsvSampleDataRow lazyGetPrcCsvSampleDataRow( final Map<String, Object> pAddParam) throws Exception { String beanName = PrcCsvSampleDataRow.class.getSimpleName(); @SuppressWarnings("unchecked") PrcCsvSampleDataRow proc = (PrcCsvSampleDataRow) this.processorsMap.get(beanName); if (proc == null) { proc = new PrcCsvSampleDataRow(); lazyInitRetrievers(pAddParam); proc.setRetrievers(this.retrievers); //assigning fully initialized object: this.processorsMap.put(beanName, proc); this.logger.info(null, FctBnTradeProcessors.class, beanName + " has been created."); } return proc; }
python
def random_get_int(rnd: Optional[tcod.random.Random], mi: int, ma: int) -> int: """Return a random integer in the range: ``mi`` <= n <= ``ma``. The result is affected by calls to :any:`random_set_distribution`. Args: rnd (Optional[Random]): A Random instance, or None to use the default. low (int): The lower bound of the random range, inclusive. high (int): The upper bound of the random range, inclusive. Returns: int: A random integer in the range ``mi`` <= n <= ``ma``. """ return int( lib.TCOD_random_get_int(rnd.random_c if rnd else ffi.NULL, mi, ma) )
java
public static void main(String[] args) throws Exception { SAXProcessor sp = new SAXProcessor(); // data // double[] dat = TSProcessor.readFileColumn(DAT_FNAME, 1, 0); // TSProcessor tp = new TSProcessor(); // double[] dat = tp.readTS("src/resources/dataset/asys40.txt", 0); // double[] dat = TSProcessor.readFileColumn(filename, columnIdx, // sizeLimit)FileColumn(DAT_FNAME, 1, 0); LOGGER.info("read {} points from {}", dat.length, DAT_FNAME); String str = "win_width: " + cPoint + "; SAX: W " + SAX_WINDOW_SIZE + ", P " + SAX_PAA_SIZE + ", A " + SAX_ALPHABET_SIZE + ", STR " + SAX_NR_STRATEGY.toString(); int frameCounter = 0; int startOffset = cPoint; while (cPoint < dat.length - startOffset - 1) { if (0 == cPoint % 2) { BufferedImage tsChart = getChart(dat, cPoint); // bitmap 1 // double[] win1 = Arrays.copyOfRange(dat, cPoint - startOffset, cPoint); Map<String, Integer> shingledData1 = sp.ts2Shingles(win1, SAX_WINDOW_SIZE, SAX_PAA_SIZE, SAX_ALPHABET_SIZE, SAX_NR_STRATEGY, SAX_NORM_THRESHOLD, SHINGLE_SIZE); BufferedImage pam1 = getHeatMap(shingledData1, "pre-window"); double[] win2 = Arrays.copyOfRange(dat, cPoint, cPoint + startOffset); Map<String, Integer> shingledData2 = sp.ts2Shingles(win2, SAX_WINDOW_SIZE, SAX_PAA_SIZE, SAX_ALPHABET_SIZE, SAX_NR_STRATEGY, SAX_NORM_THRESHOLD, SHINGLE_SIZE); BufferedImage pam2 = getHeatMap(shingledData2, "post-window"); // the assemble // BufferedImage target = new BufferedImage(800, 530, BufferedImage.TYPE_INT_ARGB); Graphics targetGraphics = target.getGraphics(); targetGraphics.setColor(Color.WHITE); targetGraphics.fillRect(0, 0, 799, 529); targetGraphics.drawImage(tsChart, 0, 0, null); targetGraphics.drawImage(pam1, 10, 410, null);// draws the first image onto it targetGraphics.drawImage(pam2, 120, 410, null);// draws the first image onto it targetGraphics.setColor(Color.RED); targetGraphics.setFont(new Font("monospaced", Font.PLAIN, 16)); targetGraphics.drawString(str, 300, 420); targetGraphics.setColor(Color.BLUE); targetGraphics.setFont(new Font("monospaced", Font.PLAIN, 24)); double dist = ed.distance(toVector(shingledData1), toVector(shingledData2)); targetGraphics.drawString("ED=" + df.format(dist), 300, 480); // String fileName = new SimpleDateFormat("yyyyMMddhhmmssSS'.png'").format(new Date()); File outputfile = new File("dframe" + String.format("%04d", frameCounter) + ".png"); ImageIO.write(target, "png", outputfile); frameCounter++; } cPoint++; } }
java
@Override public List<XMLObject> getOrderedChildren() { ArrayList<XMLObject> children = new ArrayList<XMLObject>(); if (this.schemeInformation != null) { children.add(this.schemeInformation); } children.addAll(this.metadataLists); if (this.distributionPoints != null) { children.add(this.distributionPoints); } if (this.getSignature() != null) { children.add(this.getSignature()); } return Collections.unmodifiableList(children); }
python
def get_compute(self, compute_id): """ Returns a compute server or raise a 404 error. """ try: return self._computes[compute_id] except KeyError: if compute_id == "vm": raise aiohttp.web.HTTPNotFound(text="You try to use a node on the GNS3 VM server but the GNS3 VM is not configured") raise aiohttp.web.HTTPNotFound(text="Compute ID {} doesn't exist".format(compute_id))
java
private static boolean isUnM49AreaCode(String code) { if (code.length() != 3) { return false; } for (int i = 0; i < 3; ++i) { final char character = code.charAt(i); if (!(character >= '0' && character <= '9')) { return false; } } return true; }
python
def is_java_project(self): """ Indicates if the project's main binary is a Java Archive. """ if self._is_java_project is None: self._is_java_project = isinstance(self.arch, ArchSoot) return self._is_java_project
python
def setDeclaration(self, declaration): """ Set the declaration this model will use for rendering the the headers. """ assert isinstance(declaration.proxy, ProxyAbstractItemView), \ "The model declaration must be a QtAbstractItemView subclass. " \ "Got {]".format(declaration) self.declaration = declaration
python
def tile_x_size(self, zoom): """ Width of a tile in SRID units at zoom level. - zoom: zoom level """ warnings.warn(DeprecationWarning("tile_x_size is deprecated")) validate_zoom(zoom) return round(self.x_size / self.matrix_width(zoom), ROUND)
java
public void trustOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) { if (trustCallback == null) { throw new IllegalStateException("No TrustCallback set."); } trustCallback.setTrust(device, fingerprint, TrustState.trusted); }
java
@Beta @CheckReturnValue public final FluentIterable<E> append(E... elements) { return from(Iterables.concat(iterable, Arrays.asList(elements))); }
python
def tange_pth(v, temp, v0, gamma0, a, b, theta0, n, z, t_ref=300., three_r=3. * constants.R): """ calculate thermal pressure for the Tange equation :param v: unit-cell volume in A^3 :param temp: temperature in K :param v0: unit-cell volume in A^3 at 1 bar :param gamma0: Gruneisen parameter at 1 bar :param a: volume-independent adjustable parameters :param b: volume-independent adjustable parameters :param theta0: Debye temperature at 1 bar in K :param n: number of atoms in a formula unit :param z: number of formula unit in a unit cell :param t_ref: reference temperature :param three_r: 3R in case adjustment is needed :return: thermal pressure in GPa """ v_mol = vol_uc2mol(v, z) gamma = tange_grun(v, v0, gamma0, a, b) theta = tange_debyetemp(v, v0, gamma0, a, b, theta0) xx = theta / temp debye = debye_E(xx) if t_ref == 0.: debye0 = 0. else: xx0 = theta / t_ref debye0 = debye_E(xx0) Eth0 = three_r * n * t_ref * debye0 Eth = three_r * n * temp * debye delEth = Eth - Eth0 p_th = (gamma / v_mol * delEth) * 1.e-9 return p_th
java
private void initializeRequestHandler(ResponseBuilder rb) { if (requestHandler == null) { // try to initialize for (Entry<String, SolrInfoBean> entry : rb.req.getCore().getInfoRegistry().entrySet()) { if (entry.getValue() instanceof MtasRequestHandler) { requestHandlerName = entry.getKey(); requestHandler = (MtasRequestHandler) entry.getValue(); break; } } } }
java
public static base_response update(nitro_service client, aaakcdaccount resource) throws Exception { aaakcdaccount updateresource = new aaakcdaccount(); updateresource.kcdaccount = resource.kcdaccount; updateresource.keytab = resource.keytab; updateresource.realmstr = resource.realmstr; updateresource.delegateduser = resource.delegateduser; updateresource.kcdpassword = resource.kcdpassword; updateresource.usercert = resource.usercert; updateresource.cacert = resource.cacert; return updateresource.update_resource(client); }
java
private void uninstallWindowListeners(JRootPane root) { if (window != null) { window.removeMouseListener(mouseInputListener); window.removeMouseMotionListener(mouseInputListener); } }
python
def _align_hydrogen_atoms(mol1, mol2, heavy_indices1, heavy_indices2): """ Align the label of topologically identical atoms of second molecule towards first molecule Args: mol1: First molecule. OpenBabel OBMol object mol2: Second molecule. OpenBabel OBMol object heavy_indices1: inchi label map of the first molecule heavy_indices2: label map of the second molecule Return: corrected label map of all atoms of the second molecule """ num_atoms = mol2.NumAtoms() all_atom = set(range(1, num_atoms+1)) hydrogen_atoms1 = all_atom - set(heavy_indices1) hydrogen_atoms2 = all_atom - set(heavy_indices2) label1 = heavy_indices1 + tuple(hydrogen_atoms1) label2 = heavy_indices2 + tuple(hydrogen_atoms2) cmol1 = ob.OBMol() for i in label1: oa1 = mol1.GetAtom(i) a1 = cmol1.NewAtom() a1.SetAtomicNum(oa1.GetAtomicNum()) a1.SetVector(oa1.GetVector()) cmol2 = ob.OBMol() for i in label2: oa2 = mol2.GetAtom(i) a2 = cmol2.NewAtom() a2.SetAtomicNum(oa2.GetAtomicNum()) a2.SetVector(oa2.GetVector()) aligner = ob.OBAlign(False, False) aligner.SetRefMol(cmol1) aligner.SetTargetMol(cmol2) aligner.Align() aligner.UpdateCoords(cmol2) hydrogen_label2 = [] hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1)) for h2 in range(len(heavy_indices2) + 1, num_atoms + 1): distance = 99999.0 idx = hydrogen_label1[0] a2 = cmol2.GetAtom(h2) for h1 in hydrogen_label1: a1 = cmol1.GetAtom(h1) d = a1.GetDistance(a2) if d < distance: distance = d idx = h1 hydrogen_label2.append(idx) hydrogen_label1.remove(idx) hydrogen_orig_idx2 = label2[len(heavy_indices2):] hydrogen_canon_orig_map2 = [(canon, orig) for canon, orig in zip(hydrogen_label2, hydrogen_orig_idx2)] hydrogen_canon_orig_map2.sort(key=lambda m: m[0]) hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2] canon_label1 = label1 canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2) return canon_label1, canon_label2
java
public static Calendar getJavaCalendar(final double excelDate, final boolean use1904windowing) { if (isValidExcelDate(excelDate)) { int startYear = EXCEL_BASE_YEAR; int dayAdjust = -1; // Excel thinks 2/29/1900 is a valid date, which // it isn't final int wholeDays = (int) Math.floor(excelDate); if (use1904windowing) { startYear = EXCEL_WINDOWING_1904; dayAdjust = 1; // 1904 date windowing uses 1/2/1904 as the // first day } else if (wholeDays < EXCEL_FUDGE_19000229) { // Date is prior to 3/1/1900, so adjust because Excel thinks // 2/29/1900 exists // If Excel date == 2/29/1900, will become 3/1/1900 in Java // representation dayAdjust = 0; } final GregorianCalendar calendar = new GregorianCalendar(startYear, 0, wholeDays + dayAdjust); final int millisecondsInDay = (int) ((excelDate - Math.floor(excelDate)) * DAY_MILLISECONDS + HALF_MILLISEC); calendar.set(Calendar.MILLISECOND, millisecondsInDay); return calendar; } else { return null; } }
java
public IServerInterceptor loggingInterceptor() { LoggingInterceptor retVal = new LoggingInterceptor(); retVal.setLoggerName("fhirtest.access"); retVal.setMessageFormat( "Path[${servletPath}] Source[${requestHeader.x-forwarded-for}] Operation[${operationType} ${operationName} ${idOrResourceName}] UA[${requestHeader.user-agent}] Params[${requestParameters}] ResponseEncoding[${responseEncodingNoDefault}]"); retVal.setLogExceptions(true); retVal.setErrorMessageFormat("ERROR - ${requestVerb} ${requestUrl}"); return retVal; }
python
def load(self): """ 加载会话信息 """ if not os.path.isfile(self.persist_file): return with open(self.persist_file, 'r') as f: cfg = json.load(f) or {} self.cookies = cfg.get('cookies', {}) self.user_alias = cfg.get('user_alias') or None self.logger.debug('load session for <%s> from <%s>' % (self.user_alias, self.persist_file))
python
def writefile(openedfile, newcontents): """Set the contents of a file.""" openedfile.seek(0) openedfile.truncate() openedfile.write(newcontents)
python
def _AssignVar(self, matched, value): """Assigns variable into current record from a matched rule. If a record entry is a list then append, otherwise values are replaced. Args: matched: (regexp.match) Named group for each matched value. value: (str) The matched value. """ _value = self._GetValue(value) if _value is not None: _value.AssignVar(matched.group(value))
java
public static <T extends PropertyDispatcher> T getInstance(Class<T> cls, PropertySetterDispatcher dispatcher) { try { PropertyDispatcherClass annotation = cls.getAnnotation(PropertyDispatcherClass.class); if (annotation == null) { throw new IllegalArgumentException("@"+PropertyDispatcherClass.class.getSimpleName()+" missing in cls"); } Class<?> c = Class.forName(annotation.value()); if (dispatcher == null) { T t =(T) c.newInstance(); return t; } else { Constructor<?> constructor = c.getConstructor(PropertySetterDispatcher.class); return (T) constructor.newInstance(dispatcher); } } catch (InvocationTargetException | SecurityException | NoSuchMethodException | ClassNotFoundException | InstantiationException | IllegalAccessException ex) { throw new IllegalArgumentException(ex); } }
python
def send(self, msg, timeout=None): """ Send a message to NI-CAN. :param can.Message msg: Message to send :raises can.interfaces.nican.NicanError: If writing to transmit buffer fails. It does not wait for message to be ACKed currently. """ arb_id = msg.arbitration_id if msg.is_extended_id: arb_id |= NC_FL_CAN_ARBID_XTD raw_msg = TxMessageStruct(arb_id, bool(msg.is_remote_frame), msg.dlc, CanData(*msg.data)) nican.ncWrite( self.handle, ctypes.sizeof(raw_msg), ctypes.byref(raw_msg))
python
def full_number(self): """ Returns the full number including the verification digit. :return: str """ return '{}{}'.format( ''.join(str(n) for n in self.numbers), ReceiptBarcodeGenerator.verification_digit(self.numbers), )
python
def sinusoid(freq, phase=0.): """ Sinusoid based on the optimized math.sin """ # When at 44100 samples / sec, 5 seconds of this leads to an error of 8e-14 # peak to peak. That's fairly enough. for n in modulo_counter(start=phase, modulo=2 * pi, step=freq): yield sin(n)
java
private static boolean isSimpleMode(Element element){ // simple mode is triggered through the marker css class 'o-simple-drag' return org.opencms.gwt.client.util.CmsDomUtil.getAncestor(element, "o-simple-drag")!=null; }
python
def find_metabolites_not_produced_with_open_bounds(model): """ Return metabolites that cannot be produced with open exchange reactions. A perfect model should be able to produce each and every metabolite when all medium components are available. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- list Those metabolites that could not be produced. """ mets_not_produced = list() helpers.open_exchanges(model) for met in model.metabolites: with model: exch = model.add_boundary( met, type="irrex", reaction_id="IRREX", lb=0, ub=1000) solution = helpers.run_fba(model, exch.id) if np.isnan(solution) or solution < TOLERANCE_THRESHOLD: mets_not_produced.append(met) return mets_not_produced
python
def _prepare_mock(context: 'torment.contexts.TestContext', symbol: str, return_value = None, side_effect = None) -> None: '''Sets return value or side effect of symbol's mock in context. .. seealso:: :py:func:`_find_mocker` **Parameters** :``context``: the search context :``symbol``: the symbol to be located :``return_value``: pass through to mock ``return_value`` :``side_effect``: pass through to mock ``side_effect`` ''' methods = symbol.split('.') index = len(methods) mock = None while index > 0: name = 'mocked_' + '_'.join(methods[:index]).lower() logger.debug('name: %s', name) if hasattr(context, name): mock = getattr(context, name) break index -= 1 logger.debug('mock: %s', mock) if mock is not None: mock = functools.reduce(getattr, methods[index:], mock) logger.debug('mock: %s', mock) if return_value is not None: mock.return_value = return_value if side_effect is not None: mock.side_effect = side_effect mock.reset_mock()
python
def kunc_v(p, v0, k0, k0p, order=5, min_strain=0.01): """ find volume at given pressure using brenth in scipy.optimize :param p: pressure in GPa :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param order: order of Kunc function :param min_strain: defining minimum v/v0 value to search volume for :return: unit-cell volume at high pressure in GPa :note: a wrapper function vectorizing kunc_v_single """ if isuncertainties([p, v0, k0, k0p]): f_u = np.vectorize(uct.wrap(kunc_v_single), excluded=[1, 2, 3, 4, 5]) return f_u(p, v0, k0, k0p, order=order, min_strain=min_strain) else: f_v = np.vectorize(kunc_v_single, excluded=[1, 2, 3, 4, 5]) return f_v(p, v0, k0, k0p, order=order, min_strain=min_strain)
java
@JsonIgnore public Map<String, Object> getSettings() { if (settings == null) { settings = new LinkedHashMap<>(); } return settings; }
python
def _init_metadata(self): """stub""" super(EdXDragAndDropQuestionFormRecord, self)._init_metadata() QuestionTextFormRecord._init_metadata(self) QuestionFilesFormRecord._init_metadata(self)
java
public int get(Object o) { Integer n = indices.get(o); return n == null ? -1 : n.intValue(); }
java
public static void warn(Log log, String format, Object... arguments) { warn(log, null, format, arguments); }
python
def uri(self, context=None): """ :param dict context: keys that were missing in the static context given in constructor to resolve the butcket name. """ if context is None: context = self.context else: ctx = copy.deepcopy(self.context) ctx.update(context) context = ctx return self.uri_format.format(**context)
java
@Override public void visitMethod(Method obj) { state = State.SAW_NOTHING; iConst0Looped.clear(); stack.resetForMethodEntry(this); }
java
protected void addAlias(MonolingualTextValue alias) { String lang = alias.getLanguageCode(); AliasesWithUpdate currentAliasesUpdate = newAliases.get(lang); NameWithUpdate currentLabel = newLabels.get(lang); // If there isn't any label for that language, put the alias there if (currentLabel == null) { newLabels.put(lang, new NameWithUpdate(alias, true)); // If the new alias is equal to the current label, skip it } else if (!currentLabel.value.equals(alias)) { if (currentAliasesUpdate == null) { currentAliasesUpdate = new AliasesWithUpdate(new ArrayList<MonolingualTextValue>(), true); } List<MonolingualTextValue> currentAliases = currentAliasesUpdate.aliases; if(!currentAliases.contains(alias)) { currentAliases.add(alias); currentAliasesUpdate.added.add(alias); currentAliasesUpdate.write = true; } newAliases.put(lang, currentAliasesUpdate); } }
java
public ListAppResponse listApp(ListAppRequest request) { checkNotNull(request, "The parameter request should NOT be null."); InternalRequest internalRequest = createRequest(HttpMethodName.GET, request, LIVE_APP); return invokeHttpClient(internalRequest, ListAppResponse.class); }
java
@SuppressWarnings({"ConstantConditions","deprecation"}) @SuppressFBWarnings("RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") public void addAction(@Nonnull Action a) { if(a==null) { throw new IllegalArgumentException("Action must be non-null"); } getActions().add(a); }
python
def date(self, field=None, val=None): """ Like datetime, but truncated to be a date only """ return self.datetime(field=field, val=val).date()
java
private JPanel getJPanel() { if (jPanel == null) { java.awt.GridBagConstraints gridBagConstraints7 = new GridBagConstraints(); java.awt.GridBagConstraints gridBagConstraints5 = new GridBagConstraints(); gridBagConstraints5.gridwidth = 2; java.awt.GridBagConstraints gridBagConstraints2 = new GridBagConstraints(); jPanel = new JPanel(); jPanel.setLayout(new GridBagLayout()); jPanel.setName("jPanel"); gridBagConstraints2.weightx = 1.0; gridBagConstraints2.weighty = 1.0; gridBagConstraints2.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints5.gridx = 0; gridBagConstraints5.gridy = 1; gridBagConstraints5.ipadx = 0; gridBagConstraints5.ipady = 0; gridBagConstraints5.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints5.weightx = 1.0D; gridBagConstraints5.weighty = 1.0D; gridBagConstraints5.insets = new Insets(2, 5, 5, 0); gridBagConstraints5.anchor = java.awt.GridBagConstraints.NORTHWEST; gridBagConstraints7.weightx = 1.0; gridBagConstraints7.fill = java.awt.GridBagConstraints.HORIZONTAL; gridBagConstraints7.gridx = 0; gridBagConstraints7.gridy = 0; gridBagConstraints7.insets = new Insets(2, 5, 5, 0); jPanel.add(getPanelHeadline(), gridBagConstraints7); jPanel.add(getPanelParam(), gridBagConstraints5); GridBagConstraints gbc_button = new GridBagConstraints(); gbc_button.insets = new Insets(0, 5, 0, 5); gbc_button.gridx = 1; gbc_button.gridy = 0; jPanel.add(getHelpButton(), gbc_button); } return jPanel; }
java
public Playlist withOutputKeys(String... outputKeys) { if (this.outputKeys == null) { setOutputKeys(new com.amazonaws.internal.SdkInternalList<String>(outputKeys.length)); } for (String ele : outputKeys) { this.outputKeys.add(ele); } return this; }
java
protected boolean containsRel100(Message message) { ListIterator<SIPHeader> requireHeaders = message.getHeaders(RequireHeader.NAME); if(requireHeaders != null) { while (requireHeaders.hasNext()) { if(REL100_OPTION_TAG.equals(requireHeaders.next().getValue())) { return true; } } } ListIterator<SIPHeader> supportedHeaders = message.getHeaders(SupportedHeader.NAME); if(supportedHeaders != null) { while (supportedHeaders.hasNext()) { if(REL100_OPTION_TAG.equals(supportedHeaders.next().getValue())) { return true; } } } return false; }
java
public static CPDefinition fetchByCPTaxCategoryId_First( long CPTaxCategoryId, OrderByComparator<CPDefinition> orderByComparator) { return getPersistence() .fetchByCPTaxCategoryId_First(CPTaxCategoryId, orderByComparator); }
python
def overlay_gateway_loopback_id(self, **kwargs): """Configure Overlay Gateway ip interface loopback Args: gw_name: Name of Overlay Gateway <WORD:1-32> loopback_id: Loopback interface Id <NUMBER: 1-255> get (bool): Get config instead of editing config. (True, False) delete (bool): True, delete the overlay gateway loop back id. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `gw_name`, 'loopback_id' is not passed. ValueError: if `gw_name`, 'loopback_id' is invalid. Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.overlay_gateway_loopback_id( ... gw_name='Leaf', loopback_id='10') ... output = dev.interface.overlay_gateway_loopback_id( ... get=True) ... output = dev.interface.overlay_gateway_loopback_id( ... gw_name='Leaf', loopback_id='10', delete=True) """ callback = kwargs.pop('callback', self._callback) get_config = kwargs.pop('get', False) if not get_config: gw_name = kwargs.pop('gw_name') loopback_id = kwargs.pop('loopback_id') gw_args = dict(name=gw_name, loopback_id=loopback_id) overlay_gw = getattr(self._tunnels, 'overlay_gateway_ip_' 'interface_loopback_loopback_id') config = overlay_gw(**gw_args) if get_config: overlay_gw = getattr(self._tunnels, 'overlay_gateway_ip_' 'interface_loopback_loopback_id') config = overlay_gw(name='', loopback_id='') output = callback(config, handler='get_config') if output.data.find('.//{*}name') is not None: if output.data.find('.//{*}loopback-id') is not None: ip_intf = output.data.find('.//{*}loopback-id').text return ip_intf else: return None else: return None if kwargs.pop('delete', False): config.find('.//loopback-id').set('operation', 'delete') return callback(config)
python
def is_registered(self, event_type, callback, details_filter=None): """Check if a callback is registered. :param event_type: event type callback was registered to :param callback: callback that was used during registration :param details_filter: details filter that was used during registration :returns: if the callback is registered :rtype: boolean """ listeners = self._topics.get(event_type, []) for listener in listeners: if listener.is_equivalent(callback, details_filter=details_filter): return True return False
java
private static Filter[] filters(QueryFragment... fragments) { Filter[] ret = new Filter[fragments.length]; for (int i = 0; i < fragments.length; ++i) { ret[i] = fragments[i].getFilter(); } return ret; }
python
def blit_rect( self, console: tcod.console.Console, x: int, y: int, width: int, height: int, bg_blend: int, ) -> None: """Blit onto a Console without scaling or rotation. Args: console (Console): Blit destination Console. x (int): Console tile X position starting from the left at 0. y (int): Console tile Y position starting from the top at 0. width (int): Use -1 for Image width. height (int): Use -1 for Image height. bg_blend (int): Background blending mode to use. """ lib.TCOD_image_blit_rect( self.image_c, _console(console), x, y, width, height, bg_blend )
python
def is_zombie(self, path): '''Is the node pointed to by @ref path a zombie object?''' node = self.get_node(path) if not node: return False return node.is_zombie
python
def fermionic_constraints(a): """Return a set of constraints that define fermionic ladder operators. :param a: The non-Hermitian variables. :type a: list of :class:`sympy.physics.quantum.operator.Operator`. :returns: a dict of substitutions. """ substitutions = {} for i, ai in enumerate(a): substitutions[ai ** 2] = 0 substitutions[Dagger(ai) ** 2] = 0 substitutions[ai * Dagger(ai)] = 1.0 - Dagger(ai) * ai for aj in a[i+1:]: # substitutions[ai*Dagger(aj)] = -Dagger(ai)*aj substitutions[ai*Dagger(aj)] = -Dagger(aj)*ai substitutions[Dagger(ai)*aj] = -aj*Dagger(ai) substitutions[ai*aj] = -aj*ai substitutions[Dagger(ai) * Dagger(aj)] = - Dagger(aj) * Dagger(ai) return substitutions
python
def observed_data_to_xarray(self): """Convert observed data to xarray.""" if self.observed is None: return None observed_data = {} if isinstance(self.observed, self.tf.Tensor): with self.tf.Session() as sess: vals = sess.run(self.observed, feed_dict=self.feed_dict) else: vals = self.observed if self.dims is None: dims = {} else: dims = self.dims name = "obs" val_dims = dims.get(name) vals = np.atleast_1d(vals) val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=self.coords) # coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims} observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords) return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.tfp))
python
def getTotalExpectedOccurrencesTicks_2_5(ticks): """ Extract a set of tick locations and labels. The input ticks are assumed to mean "How many *other* occurrences are there of the sensed feature?" but we want to show how many *total* occurrences there are. So we add 1. We label tick 2, and then 5, 10, 15, 20, ... @param ticks A list of ticks, typically calculated by one of the above generate*List functions. """ locs = [loc for label, loc in ticks] labels = [(str(label + 1) if (label + 1 == 2 or (label+1) % 5 == 0) else "") for label, loc in ticks] return locs, labels
java
private void completeHalfDoneAcceptRecovery( PersistedRecoveryPaxosData paxosData) throws IOException { if (paxosData == null) { return; } long segmentId = paxosData.getSegmentState().getStartTxId(); long epoch = paxosData.getAcceptedInEpoch(); File tmp = journalStorage.getSyncLogTemporaryFile(segmentId, epoch); if (tmp.exists()) { File dst = journalStorage.getInProgressEditLog(segmentId); LOG.info("Rolling forward previously half-completed synchronization: " + tmp + " -> " + dst); FileUtil.replaceFile(tmp, dst); } }
java
public static CPDefinitionLink fetchByCPD_T_First(long CPDefinitionId, String type, OrderByComparator<CPDefinitionLink> orderByComparator) { return getPersistence() .fetchByCPD_T_First(CPDefinitionId, type, orderByComparator); }
java
private boolean implementsCommonInterface(String name) { try { JavaClass cls = Repository.lookupClass(name); JavaClass[] infs = cls.getAllInterfaces(); for (JavaClass inf : infs) { String infName = inf.getClassName(); if (ignorableInterfaces.contains(infName)) { continue; } if (infName.startsWith("java.")) { return true; } } return false; } catch (ClassNotFoundException cnfe) { bugReporter.reportMissingClass(cnfe); return true; } }
java
protected void fireSegmentAdded(RoadSegment segment) { if (this.listeners != null && isEventFirable()) { for (final RoadNetworkListener listener : this.listeners) { listener.onRoadSegmentAdded(this, segment); } } }
java
public static <V> LongObjectHashMap<V> createPrimitiveLongKeyMap(int initialCapacity, float loadFactor) { return new LongObjectHashMap<V>(initialCapacity, loadFactor); }
python
def do_bestfit(self): """ do bestfit using scipy.odr """ self.check_important_variables() x = np.array(self.args["x"]) y = np.array(self.args["y"]) if self.args.get("use_RealData", True): realdata_kwargs = self.args.get("RealData_kwargs", {}) data = RealData(x, y, **realdata_kwargs) else: data_kwargs = self.args.get("Data_kwargs", {}) data = Data(x, y, **data_kwargs) model = self.args.get("Model", None) if model is None: if "func" not in self.args.keys(): raise KeyError("Need fitting function") model_kwargs = self.args.get("Model_kwargs", {}) model = Model(self.args["func"], **model_kwargs) odr_kwargs = self.args.get("ODR_kwargs", {}) odr = ODR(data, model, **odr_kwargs) self.output = odr.run() if self.args.get("pprint", False): self.output.pprint() self.fit_args = self.output.beta return self.fit_args
python
def mousePressEvent(self, event): """Override Qt method""" if event.button() == Qt.MidButton: index = self.tabBar().tabAt(event.pos()) if index >= 0: self.sig_close_tab.emit(index) event.accept() return QTabWidget.mousePressEvent(self, event)
java
public Object execute(final Map<Object, Object> iArgs) { if (newRecords == null) throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet"); final OCommandParameters commandParameters = new OCommandParameters(iArgs); if (indexName != null) { final OIndex<?> index = getDatabase().getMetadata().getIndexManager().getIndex(indexName); if (index == null) throw new OCommandExecutionException("Target index '" + indexName + "' not found"); // BIND VALUES Map<String, Object> result = null; for (Map<String, Object> candidate : newRecords) { index.put(getIndexKeyValue(commandParameters, candidate), getIndexValue(commandParameters, candidate)); result = candidate; } // RETURN LAST ENTRY return new ODocument(result); } else { // CREATE NEW DOCUMENTS final List<ODocument> docs = new ArrayList<ODocument>(); for (Map<String, Object> candidate : newRecords) { final ODocument doc = className != null ? new ODocument(className) : new ODocument(); OSQLHelper.bindParameters(doc, candidate, commandParameters); if (clusterName != null) { doc.save(clusterName); } else { doc.save(); } docs.add(doc); } if (docs.size() == 1) { return docs.get(0); } else { return docs; } } }
python
def calculate_size(transaction_id, thread_id): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(transaction_id) data_size += LONG_SIZE_IN_BYTES return data_size
java
@Override public synchronized void onClose() { super.onClose(); if (phoneStateListener != null) telephonyManager.listen(phoneStateListener, PhoneStateListener.LISTEN_NONE); }
python
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1): """ Builds mask image from rootname's DQ array If there is no valid 'DQ' array in image, then return an empty string. """ # If no bitvalue is set or rootname given, assume no mask is desired # However, this name would be useful as the output mask from # other processing, such as MultiDrizzle, so return it anyway. #if bitvalue == None or rootname == None: # return None # build output name maskname = output # If an old version of the maskfile was present, remove it and rebuild it. if fileutil.findFile(maskname): fileutil.removeFile(maskname) # Open input file with DQ array fdq = fileutil.openImage(rootname, mode='readonly', memmap=False) try: _extn = fileutil.findExtname(fdq, extname, extver=extver) if _extn is not None: # Read in DQ array dqarr = fdq[_extn].data else: dqarr = None # For the case where there is no DQ array, # create a mask image of all ones. if dqarr is None: # We need to get the dimensions of the output DQ array # Since the DQ array is non-existent, look for the SCI extension _sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver) if _sci_extn is not None: _shape = fdq[_sci_extn].data.shape dqarr = np.zeros(_shape,dtype=np.uint16) else: raise Exception # Build mask array from DQ array maskarr = buildMask(dqarr,bitvalue) #Write out the mask file as simple FITS file fmask = fits.open(maskname, mode='append', memmap=False) maskhdu = fits.PrimaryHDU(data = maskarr) fmask.append(maskhdu) #Close files fmask.close() del fmask fdq.close() del fdq except: fdq.close() del fdq # Safeguard against leaving behind an incomplete file if fileutil.findFile(maskname): os.remove(maskname) _errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n" #raise IOError, _errstr print(_errstr) return None # Return the name of the mask image written out return maskname
python
def _get_binop_contexts(context, left, right): """Get contexts for binary operations. This will return two inference contexts, the first one for x.__op__(y), the other one for y.__rop__(x), where only the arguments are inversed. """ # The order is important, since the first one should be # left.__op__(right). for arg in (right, left): new_context = context.clone() new_context.callcontext = contextmod.CallContext(args=[arg]) new_context.boundnode = None yield new_context
python
def create_shortlink(self, callback_uri=None, description=None, serial_number=None): """Register new shortlink Arguments: callback_uri: URI called by mCASH when user scans shortlink description: Shortlink description displayed in confirmation dialogs serial_number: Serial number on printed QR codes. This field is only used when registering printed stickers issued by mCASH """ arguments = {'callback_uri': callback_uri, 'description': description, 'serial_number': serial_number} return self.do_req('POST', self.merchant_api_base_url + '/shortlink/', arguments).json()
java
static public int asIntValue(byte[] array, int offset, int length) { if (null == array || array.length <= offset) { return -1; } int intVal = 0; int mark = 1; int digit; int i = offset + length - 1; // PK16337 - ignore trailing whitespace for (; offset <= i; i--) { char c = (char) array[i]; if (BNFHeaders.SPACE != c && BNFHeaders.TAB != c) { break; } } for (; offset <= i; i--) { digit = array[i] - ZERO; if (0 > digit || 9 < digit) { // stop on any nondigit, if it's not a DASH then throw an exc if (DASH != array[i]) { throw new NumberFormatException("Invalid digit: " + array[i]); } break; } intVal += digit * mark; mark *= 10; } // check for negative numbers if (offset <= i && array[i] == DASH) { intVal = -intVal; } return intVal; }