language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def get_dict_value(dictionary, path): """ Safely get the value of a dictionary given a key path. For instance, for the dictionary `{ 'a': { 'b': 1 } }`, the value at key path ['a'] is { 'b': 1 }, at key path ['a', 'b'] is 1, at key path ['a', 'b', 'c'] is None. :param dictionary: a dictionary. :param path: the key path. :return: The value of d at the given key path, or None if the key path does not exist. """ if len(path) == 0: return None temp_dictionary = dictionary try: for k in path: temp_dictionary = temp_dictionary[k] return temp_dictionary except (KeyError, TypeError): pass return None
python
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64): """Returns a key derived using the scrypt key-derivarion function N must be a power of two larger than 1 but no larger than 2 ** 63 (insane) r and p must be positive numbers such that r * p < 2 ** 30 The default values are: N -- 2**14 (~16k) r -- 8 p -- 1 Memory usage is proportional to N*r. Defaults require about 16 MiB. Time taken is proportional to N*p. Defaults take <100ms of a recent x86. The last one differs from libscrypt defaults, but matches the 'interactive' work factor from the original paper. For long term storage where runtime of key derivation is not a problem, you could use 16 as in libscrypt or better yet increase N if memory is plentiful. """ check_args(password, salt, N, r, p, olen) try: return _scrypt(password=password, salt=salt, N=N, r=r, p=p, buflen=olen) except: raise ValueError
java
public int[] getSparseIndices(int component) { assert (sparse[component]); int[] indices = new int[pointers[component].length / 2]; for (int i = 0; i < pointers[component].length / 2; i++) { indices[i] = (int) pointers[component][i * 2]; } return indices; }
java
protected VarRefAssignExpress parseAssingInExp(AssignGeneralInExpContext agc) { VarRefAssignExpress vas = null; ExpressionContext expCtx = agc.generalAssignExp().expression(); Expression exp = parseExpress(expCtx); VarRefContext varRefCtx = agc.generalAssignExp().varRef(); VarRef ref = this.parseVarRefInLeftExpression(varRefCtx); vas = new VarRefAssignExpress(exp,ref); if (ref.attributes.length==0) { // 变量定义: Token token = varRefCtx.Identifier().getSymbol(); if (pbCtx.hasDefined(token.getText()) != null) { registerVar(vas); return vas; } else { BeetlException ex = new BeetlException(BeetlException.VAR_NOT_DEFINED); ex.pushToken(this.getBTToken(token)); throw ex; } } return vas; }
java
public synchronized void printResults() throws Exception { ClientStats stats = fullStatsContext.fetch().getStats(); // 1. Unique Device ID counting results System.out.println("\n" + HORIZONTAL_RULE + " Unique Device ID Counting Results\n" + HORIZONTAL_RULE); System.out.printf("A total of %,9d device ids were processed and %d failed (%.2f percent).\n\n", totalVotes.get(), failedVotes.get(), failedVotes.get() * 100.0 / acceptedVotes.get()); long counts[] = new long[config.appcount]; double errors[] = new double[config.appcount]; int nonZeroCount = 0; double errorSum = 0; for (int appId = 0; appId < config.appcount; appId++) { ClientResponse cr = client.callProcedure("GetCardEstForApp", appId); long estimate = cr.getResults()[0].asScalarLong(); long count = generator.expectedCountForApp(appId); if (estimate == count) { nonZeroCount++; } double percentError = Math.abs(estimate - count) * 100.0 / count; errorSum += percentError; counts[appId] = count; errors[appId] = percentError; } Arrays.sort(errors); double maxError = errors[config.appcount - 1]; double medianError = errors[config.appcount / 2]; double meanError = errorSum / config.appcount; System.out.printf("The maximum error percentage was: %.2f.\n", maxError); System.out.printf("The median error percentage was: %.2f.\n", medianError); System.out.printf("The mean error percentage was: %.2f.\n", meanError); System.out.printf("The number of apps with perfect estimates was %d or %.2f%%.\n", nonZeroCount, nonZeroCount * 100.0 / config.appcount); // 3. Performance statistics System.out.println("\n" + HORIZONTAL_RULE + " Client Workload Statistics\n" + HORIZONTAL_RULE); System.out.printf("Average throughput: %,9d txns/sec\n", stats.getTxnThroughput()); System.out.println(); // 4. Write stats to file if requested client.writeSummaryCSV(stats, config.statsfile); }
java
public static Config load(ClassLoader loader, ConfigParseOptions parseOptions) { return load(parseOptions.setClassLoader(loader)); }
python
def delete(self): """ Deletes this NIO. """ if self._input_filter or self._output_filter: yield from self.unbind_filter("both") yield from self._hypervisor.send("nio delete {}".format(self._name)) log.info("NIO {name} has been deleted".format(name=self._name))
java
public static void loadAndShow(final Runnable finishAction) { CmsRpcAction<CmsUserSettingsBean> action = new CmsRpcAction<CmsUserSettingsBean>() { @Override public void execute() { start(200, false); CmsCoreProvider.getService().loadUserSettings(this); } @Override protected void onResponse(CmsUserSettingsBean result) { stop(false); CmsUserSettingsDialog dlg = new CmsUserSettingsDialog(result, finishAction); dlg.centerHorizontally(50); dlg.initWidth(); } }; action.execute(); }
java
private static ObjectMetadata getObjectMetadataForKey(Key k) { String[] bk = decodeKey(k); assert (bk.length == 2); return getClient().getObjectMetadata(bk[0], bk[1]); }
java
private void restoreDefaults(@NonNull final PreferenceGroup preferenceGroup, @NonNull final SharedPreferences sharedPreferences) { for (int i = 0; i < preferenceGroup.getPreferenceCount(); i++) { Preference preference = preferenceGroup.getPreference(i); if (preference instanceof PreferenceGroup) { restoreDefaults((PreferenceGroup) preference, sharedPreferences); } else if (preference.getKey() != null && !preference.getKey().isEmpty()) { Object oldValue = sharedPreferences.getAll().get(preference.getKey()); if (notifyOnRestoreDefaultValueRequested(preference, oldValue)) { sharedPreferences.edit().remove(preference.getKey()).apply(); preferenceGroup.removePreference(preference); preferenceGroup.addPreference(preference); Object newValue = sharedPreferences.getAll().get(preference.getKey()); notifyOnRestoredDefaultValue(preference, oldValue, newValue); } else { preferenceGroup.removePreference(preference); preferenceGroup.addPreference(preference); } } } }
python
def get_effective_ecs(self, strain, order=2): """ Returns the effective elastic constants from the elastic tensor expansion. Args: strain (Strain or 3x3 array-like): strain condition under which to calculate the effective constants order (int): order of the ecs to be returned """ ec_sum = 0 for n, ecs in enumerate(self[order-2:]): ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n) return ec_sum
java
public void ensureTablesExist(final TSDB tsdb) { final List<Deferred<Object>> deferreds = new ArrayList<Deferred<Object>>(forward_intervals.size() * 2); for (RollupInterval interval : forward_intervals.values()) { deferreds.add(tsdb.getClient() .ensureTableExists(interval.getTemporalTable())); deferreds.add(tsdb.getClient() .ensureTableExists(interval.getGroupbyTable())); } try { Deferred.group(deferreds).joinUninterruptibly(); } catch (DeferredGroupException e) { throw new RuntimeException(e.getCause()); } catch (InterruptedException e) { LOG.warn("Interrupted", e); Thread.currentThread().interrupt(); } catch (Exception e) { throw new RuntimeException("Unexpected exception", e); } }
python
def estimate_gas_for_function( address, web3, fn_identifier=None, transaction=None, contract_abi=None, fn_abi=None, block_identifier=None, *args, **kwargs, ): """Temporary workaround until next web3.py release (5.X.X)""" estimate_transaction = prepare_transaction( address, web3, fn_identifier=fn_identifier, contract_abi=contract_abi, fn_abi=fn_abi, transaction=transaction, fn_args=args, fn_kwargs=kwargs, ) try: gas_estimate = web3.eth.estimateGas(estimate_transaction, block_identifier) except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): gas_estimate = None else: # else the error is not denoting estimate gas failure and is something else raise e return gas_estimate
python
def get_eventhub_info(self): """ Get details on the specified EventHub. Keys in the details dictionary include: -'name' -'type' -'created_at' -'partition_count' -'partition_ids' :rtype: dict """ alt_creds = { "username": self._auth_config.get("iot_username"), "password":self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) mgmt_client.open() mgmt_msg = Message(application_properties={'name': self.eh_name}) response = mgmt_client.mgmt_request( mgmt_msg, constants.READ_OPERATION, op_type=b'com.microsoft:eventhub', status_code_field=b'status-code', description_fields=b'status-description') eh_info = response.get_data() output = {} if eh_info: output['name'] = eh_info[b'name'].decode('utf-8') output['type'] = eh_info[b'type'].decode('utf-8') output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) output['partition_count'] = eh_info[b'partition_count'] output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output finally: mgmt_client.close()
python
def cmd_save(args): '''save a graph''' child = multiproc.Process(target=save_process, args=[mestate.last_graph, mestate.child_pipe_send_console, mestate.child_pipe_send_graph, mestate.status.msgs]) child.start()
java
public static File createTemporaryEmptyCatalogJarFile(boolean isXDCR) throws IOException { File emptyJarFile = File.createTempFile("catalog-empty", ".jar"); emptyJarFile.deleteOnExit(); VoltCompiler compiler = new VoltCompiler(isXDCR); if (!compiler.compileEmptyCatalog(emptyJarFile.getAbsolutePath())) { return null; } return emptyJarFile; }
java
private void release(PooledConnection<C> pooledConnection) { long currentTime = System.currentTimeMillis(); long useTime; synchronized(pooledConnection) { pooledConnection.releaseTime = currentTime; useTime = currentTime - pooledConnection.startTime; if(useTime>0) pooledConnection.totalTime.addAndGet(useTime); pooledConnection.allocateStackTrace = null; } // Remove from the pool synchronized(poolLock) { try { if(busyConnections.remove(pooledConnection)) availableConnections.add(pooledConnection); } finally { poolLock.notify(); } } }
java
public T get(String json, OnJsonObjectAddListener listener) throws IOException, JsonFormatException { JsonPullParser parser = JsonPullParser.newParser(json); return get(parser); }
python
def encode_constructor_arguments(self, args): """ Return the encoded constructor call. """ if self.constructor_data is None: raise ValueError( "The contract interface didn't have a constructor") return encode_abi(self.constructor_data['encode_types'], args)
python
def explain_prediction_tree_classifier( clf, doc, vec=None, top=None, top_targets=None, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None, vectorized=False): """ Explain prediction of a tree classifier. See :func:`eli5.explain_prediction` for description of ``top``, ``top_targets``, ``target_names``, ``targets``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``vec`` is a vectorizer instance used to transform raw features to the input of the classifier ``clf`` (e.g. a fitted CountVectorizer instance); you can pass it instead of ``feature_names``. ``vectorized`` is a flag which tells eli5 if ``doc`` should be passed through ``vec`` or not. By default it is False, meaning that if ``vec`` is not None, ``vec.transform([doc])`` is passed to the classifier. Set it to True if you're passing ``vec``, but ``doc`` is already vectorized. Method for determining feature importances follows an idea from http://blog.datadive.net/interpreting-random-forests/. Feature weights are calculated by following decision paths in trees of an ensemble (or a single tree for DecisionTreeClassifier). Each node of the tree has an output score, and contribution of a feature on the decision path is how much the score changes from parent to child. Weights of all features sum to the output score or proba of the estimator. """ vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names) X = get_X(doc, vec=vec, vectorized=vectorized) if feature_names.bias_name is None: # Tree estimators do not have an intercept, but here we interpret # them as having an intercept feature_names.bias_name = '<BIAS>' proba = predict_proba(clf, X) if hasattr(clf, 'decision_function'): score, = clf.decision_function(X) else: score = None is_multiclass = clf.n_classes_ > 2 feature_weights = _trees_feature_weights( clf, X, feature_names, clf.n_classes_) x = get_X0(add_intercept(X)) flt_feature_names, flt_indices = feature_names.handle_filter( feature_filter, feature_re, x) def _weights(label_id, scale=1.0): weights = feature_weights[:, label_id] return get_top_features_filtered(x, flt_feature_names, flt_indices, weights, top, scale) res = Explanation( estimator=repr(clf), method='decision path', targets=[], description=(DESCRIPTION_TREE_CLF_MULTICLASS if is_multiclass else DESCRIPTION_TREE_CLF_BINARY), ) assert res.targets is not None display_names = get_target_display_names( clf.classes_, target_names, targets, top_targets, score=score if score is not None else proba) if is_multiclass: for label_id, label in display_names: target_expl = TargetExplanation( target=label, feature_weights=_weights(label_id), score=score[label_id] if score is not None else None, proba=proba[label_id] if proba is not None else None, ) add_weighted_spans(doc, vec, vectorized, target_expl) res.targets.append(target_expl) else: target, scale, label_id = get_binary_target_scale_label_id( score, display_names, proba) target_expl = TargetExplanation( target=target, feature_weights=_weights(label_id, scale=scale), score=score if score is not None else None, proba=proba[label_id] if proba is not None else None, ) add_weighted_spans(doc, vec, vectorized, target_expl) res.targets.append(target_expl) return res
java
public static int compare(XtendParameter p1, XtendParameter p2) { if (p1 != p2) { if (p1 == null) { return Integer.MIN_VALUE; } if (p2 == null) { return Integer.MAX_VALUE; } final JvmTypeReference t1 = p1.getParameterType(); final JvmTypeReference t2 = p2.getParameterType(); if (t1 != t2) { final int cmp; if (t1 == null) { cmp = Integer.MIN_VALUE; } else if (t2 == null) { cmp = Integer.MAX_VALUE; } else { cmp = t1.getIdentifier().compareTo(t2.getIdentifier()); } if (cmp != 0) { return cmp; } } } return 0; }
java
protected void processIcons(ArtifactMetadata amd, RepositoryResourceWritable res) throws RepositoryException { String current = ""; String sizeString = ""; String iconName = ""; String iconNames = amd.getIcons(); if (iconNames != null) { iconNames.replaceAll("\\s", ""); StringTokenizer s = new StringTokenizer(iconNames, ","); while (s.hasMoreTokens()) { current = s.nextToken(); int size = 0; if (current.contains(";")) { // if the icon has an associated // size StringTokenizer t = new StringTokenizer(current, ";"); while (t.hasMoreTokens()) { sizeString = t.nextToken(); if (sizeString.contains("size=")) { String sizes[] = sizeString.split("size="); size = Integer.parseInt(sizes[sizes.length - 1]); } else { iconName = sizeString; } } } else { iconName = current; } File icon = this.extractFileFromArchive(amd.getArchive().getAbsolutePath(), iconName).getExtractedFile(); if (icon.exists()) { AttachmentResourceWritable at = res.addAttachment(icon, AttachmentType.THUMBNAIL); if (size != 0) { at.setImageDimensions(size, size); } } else { throw new RepositoryArchiveEntryNotFoundException("Icon does not exist", amd.getArchive(), iconName); } } } }
python
def code_deparse_around_offset(name, offset, co, out=StringIO(), version=None, is_pypy=None, debug_opts=DEFAULT_DEBUG_OPTS): """ Like deparse_code(), but given a function/module name and offset, finds the node closest to offset. If offset is not an instruction boundary, we raise an IndexError. """ assert iscode(co) if version is None: version = sysinfo2float() if is_pypy is None: is_pypy = IS_PYPY deparsed = code_deparse(co, out, version, is_pypy, debug_opts) if (name, offset) in deparsed.offsets.keys(): # This is the easy case return deparsed valid_offsets = [t for t in deparsed.offsets if isinstance(t[1], int)] offset_list = sorted([t[1] for t in valid_offsets if t[0] == name]) # FIXME: should check for branching? found_offset = find_gt(offset_list, offset) deparsed.offsets[name, offset] = deparsed.offsets[name, found_offset] return deparsed
java
public Env<AttrContext> classEnv(JCClassDecl tree, Env<AttrContext> env) { Env<AttrContext> localEnv = env.dup(tree, env.info.dup(WriteableScope.create(tree.sym))); localEnv.enclClass = tree; localEnv.outer = env; localEnv.info.isSelfCall = false; localEnv.info.lint = null; // leave this to be filled in by Attr, // when annotations have been processed localEnv.info.isAnonymousDiamond = TreeInfo.isDiamond(env.tree); return localEnv; }
java
public boolean isAssignable(Type t, Type s, Warner warn) { if (t.hasTag(ERROR)) return true; if (t.getTag().isSubRangeOf(INT) && t.constValue() != null) { int value = ((Number)t.constValue()).intValue(); switch (s.getTag()) { case BYTE: case CHAR: case SHORT: case INT: if (s.getTag().checkRange(value)) return true; break; case CLASS: switch (unboxedType(s).getTag()) { case BYTE: case CHAR: case SHORT: return isAssignable(t, unboxedType(s), warn); } break; } } return isConvertible(t, s, warn); }
python
def dragEnterEvent(self, event): """Determines if the widget under the mouse can recieve the drop""" super(AbstractDragView, self).dragEnterEvent(event) if event.mimeData().hasFormat("application/x-protocol"): event.setDropAction(QtCore.Qt.MoveAction) event.accept() else: event.ignore()
python
def unpackage(package_): ''' Unpackages a payload ''' return salt.utils.msgpack.loads(package_, use_list=True, _msgpack_module=msgpack)
python
def get_rect(self): """ Get rectangle of app or desktop resolution Returns: RECT(left, top, right, bottom) """ if self.handle: left, top, right, bottom = win32gui.GetWindowRect(self.handle) return RECT(left, top, right, bottom) else: desktop = win32gui.GetDesktopWindow() left, top, right, bottom = win32gui.GetWindowRect(desktop) return RECT(left, top, right, bottom)
java
@Override public ServerConfiguration read() throws ConfigurationException { String defaultXmlLocation = "/appsensor-server-config.xml"; String defaultXsdLocation = "/appsensor_server_config_2.0.xsd"; return read(defaultXmlLocation, defaultXsdLocation); }
python
def transform(self, X=None, y=None): """ Transform an image using an Affine transform with the given rotation parameters. Return the transform if X=None. Arguments --------- X : ANTsImage Image to transform y : ANTsImage (optional) Another image to transform Returns ------- ANTsImage if y is None, else a tuple of ANTsImage types Examples -------- >>> import ants >>> img = ants.image_read(ants.get_data('r16')) >>> tx = ants.contrib.Rotate2D(rotation=(10,-5,12)) >>> img2 = tx.transform(img) """ # unpack zoom range rotation = self.rotation # Rotation about X axis theta = math.pi / 180 * rotation rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0]]) self.tx.set_parameters(rotation_matrix) if self.lazy or X is None: return self.tx else: return self.tx.apply_to_image(X, reference=self.reference)
java
public static TimecodeRange merge(TimecodeRange a, TimecodeRange b) { final Timecode start = TimecodeComparator.min(a.getStart(), b.getStart()); final Timecode end = TimecodeComparator.max(a.getEnd(), b.getEnd()); return new TimecodeRange(start, end); }
java
public PutPlaybackConfigurationResult withTags(java.util.Map<String, String> tags) { setTags(tags); return this; }
python
def fetch_bug_details(self, bug_ids): """Fetches bug metadata from bugzilla and returns an encoded dict if successful, otherwise returns None.""" params = {'include_fields': 'product, component, priority, whiteboard, id'} params['id'] = bug_ids try: response = self.session.get(settings.BZ_API_URL + '/rest/bug', headers=self.session.headers, params=params, timeout=30) response.raise_for_status() except RequestException as e: logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e)) return None if response.headers['Content-Type'] == 'text/html; charset=UTF-8': return None data = response.json() if 'bugs' not in data: return None return data['bugs']
java
private RaftServiceContext getOrInitializeService(PrimitiveId primitiveId, PrimitiveType primitiveType, String serviceName, byte[] config) { // Get the state machine executor or create one if it doesn't already exist. RaftServiceContext service = raft.getServices().getService(serviceName); if (service == null) { service = initializeService(primitiveId, primitiveType, serviceName, config); } return service; }
python
def mp_iuwt_recomposition(in1, scale_adjust, core_count, smoothed_array): """ This function calls the a trous algorithm code to recompose the input into a single array. This is the implementation of the isotropic undecimated wavelet transform recomposition for multiple CPU cores. INPUTS: in1 (no default): Array containing wavelet coefficients. scale_adjust (no default): Indicates the number of omitted array pages. core_count (no default): Indicates the number of cores to be used. smoothed_array (default=None): For a complete inverse transform, this must be the smoothest approximation. OUTPUTS: recomposiiton Array containing the reconstructed image. """ wavelet_filter = (1./16)*np.array([1,4,6,4,1]) # Filter-bank for use in the a trous algorithm. # Determines scale with adjustment and creates a zero array to store the output, unless smoothed_array is given. max_scale = in1.shape[0] + scale_adjust if smoothed_array is None: recomposition = np.zeros([in1.shape[1], in1.shape[2]]) else: recomposition = smoothed_array # The following loops call the a trous algorithm code to recompose the input. The first loop assumes that there are # non-zero wavelet coefficients at scales above scale_adjust, while the second loop completes the recomposition # on the scales less than scale_adjust. for i in range(max_scale-1, scale_adjust-1, -1): recomposition = mp_a_trous(recomposition, wavelet_filter, i, core_count) + in1[i-scale_adjust,:,:] if scale_adjust>0: for i in range(scale_adjust-1, -1, -1): recomposition = mp_a_trous(recomposition, wavelet_filter, i, core_count) return recomposition
python
def calculate_concat_output_shapes(operator): ''' Allowed input/output patterns are 1. [N_1, C, H, W], ..., [N_n, C, H, W] ---> [N_1 + ... + N_n, C, H, W] 2. [N, C_1, H, W], ..., [N, C_n, H, W] ---> [N, C_1 + ... + C_n, H, W] ''' check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=[1, 1]) output_shape = copy.deepcopy(operator.inputs[0].type.shape) dims = [] for variable in operator.inputs: if variable.type.shape[0] != 'None' and variable.type.shape[0] != output_shape[0]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[2] != 'None' and variable.type.shape[2] != output_shape[2]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[3] != 'None' and variable.type.shape[3] != output_shape[3]: raise RuntimeError('Only dimensions along C-axis can be different') dims.append(variable.type.shape[1]) output_shape[1] = 'None' if 'None' in dims else sum(dims) operator.outputs[0].type.shape = output_shape
python
def merge_radia(job, perchrom_rvs): """ This module will merge the per-chromosome radia files created by spawn_radia into a genome vcf. It will make 2 vcfs, one for PASSing non-germline calls, and one for all calls. ARGUMENTS 1. perchrom_rvs: REFER RETURN VALUE of spawn_radia() RETURN VALUES 1. output_files: Dict of outputs output_files |- radia_calls.vcf: <JSid> +- radia_parsed_filter_passing_calls.vcf: <JSid> This module corresponds to node 11 on the tree """ job.fileStore.logToMaster('Running merge_radia') work_dir = job.fileStore.getLocalTempDir() # We need to squash the input dict of dicts to a single dict such that it can be passed to # get_files_from_filestore input_files = {filename: jsid for perchrom_files in perchrom_rvs.values() for filename, jsid in perchrom_files.items()} input_files = get_files_from_filestore(job, input_files, work_dir, docker=False) chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']] with open('/'.join([work_dir, 'radia_calls.vcf']), 'w') as radfile, \ open('/'.join([work_dir, 'radia_filter_passing_calls.vcf']), 'w') as radpassfile: for chrom in chromosomes: with open(input_files[''.join(['radia_filtered_', chrom, '.vcf'])], 'r') as filtradfile: for line in filtradfile: line = line.strip() if line.startswith('#'): if chrom == 'chr1': print(line, file=radfile) print(line, file=radpassfile) continue else: print(line, file=radfile) line = line.split('\t') if line[6] == 'PASS' and 'MT=GERM' not in line[7]: print('\t'.join(line), file=radpassfile) # parse the PASS radia vcf for multiple alt alleles with open(radpassfile.name, 'r') as radpassfile, \ open('/'.join([work_dir, 'radia_parsed_filter_passing_calls.vcf']), 'w') as parsedradfile: parse_radia_multi_alt(radpassfile, parsedradfile) output_files = defaultdict() for radia_file in [radfile.name, parsedradfile.name]: output_files[os.path.basename(radia_file)] = job.fileStore.writeGlobalFile(radia_file) return output_files
java
public boolean isJavaSwitchExpression(final XSwitchExpression it) { boolean _xblockexpression = false; { final LightweightTypeReference switchType = this.getSwitchVariableType(it); if ((switchType == null)) { return false; } boolean _isSubtypeOf = switchType.isSubtypeOf(Integer.TYPE); if (_isSubtypeOf) { return true; } boolean _isSubtypeOf_1 = switchType.isSubtypeOf(Enum.class); if (_isSubtypeOf_1) { return true; } _xblockexpression = false; } return _xblockexpression; }
python
def subdispatch_to_all_initiatortransfer( payment_state: InitiatorPaymentState, state_change: StateChange, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, ) -> TransitionResult[InitiatorPaymentState]: events = list() ''' Copy and iterate over the list of keys because this loop will alter the `initiator_transfers` list and this is not allowed if iterating over the original list. ''' for secrethash in list(payment_state.initiator_transfers.keys()): initiator_state = payment_state.initiator_transfers[secrethash] sub_iteration = subdispatch_to_initiatortransfer( payment_state=payment_state, initiator_state=initiator_state, state_change=state_change, channelidentifiers_to_channels=channelidentifiers_to_channels, pseudo_random_generator=pseudo_random_generator, ) events.extend(sub_iteration.events) return TransitionResult(payment_state, events)
python
def __get_header_with_auth(self): """ This private method returns the HTTP heder filled with the Authorization information with the user token. The token validity is monitored whenever this function is called, so according to the swagger page of TheTVDB (https://api.thetvdb.com/swagger) the tokens are valid for 24 hours, therefore if a token is generated for over 23 hours already, this function will also perform a token refresh using TheTVDB refresh_token API. If over 24 hours have passed since the token generation, a login is performed to generate a new one, instead. :return: A python dictionary representing the HTTP header to be used in TheTVDB API calls. """ auth_header = self.__get_header() auth_header['Authorization'] = 'Bearer %s' % self.__token token_renew_time = self.__auth_time + timedelta(seconds=self.TOKEN_DURATION_SECONDS) if datetime.now() > token_renew_time: token_max_time = self.__auth_time + timedelta(seconds=self.TOKEN_MAX_DURATION) if datetime.now() < token_max_time: self.__refresh_token() else: self.login() auth_header['Authorization'] = 'Bearer %s' % self.__token return auth_header
java
public WrappedByteBuffer compact() { int remaining = remaining(); int capacity = capacity(); if (capacity == 0) { return this; } if (remaining <= capacity >>> 2 && capacity > _minimumCapacity) { int newCapacity = capacity; int minCapacity = max(_minimumCapacity, remaining << 1); for (;;) { if (newCapacity >>> 1 < minCapacity) { break; } newCapacity >>>= 1; } newCapacity = max(minCapacity, newCapacity); if (newCapacity == capacity) { if (_buf.remaining() == 0) { _buf.position(0); _buf.limit(_buf.capacity()); } else { java.nio.ByteBuffer dup = _buf.duplicate(); _buf.position(0); _buf.limit(_buf.capacity()); _buf.put(dup); } return this; } // Shrink and compact: // // Save the state. ByteOrder bo = order(); // // Sanity check. if (remaining > newCapacity) { throw new IllegalStateException("The amount of the remaining bytes is greater than " + "the new capacity."); } // // Reallocate. java.nio.ByteBuffer oldBuf = _buf; java.nio.ByteBuffer newBuf = java.nio.ByteBuffer.allocate(newCapacity); newBuf.put(oldBuf); _buf = newBuf; // // Restore the state. _buf.order(bo); } else { _buf.compact(); } return this; }
python
def request(self, config, format='xml', target='candidate', default_operation=None, test_option=None, error_option=None): """Loads all or part of the specified *config* to the *target* configuration datastore. *target* is the name of the configuration datastore being edited *config* is the configuration, which must be rooted in the `config` element. It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`. *default_operation* if specified must be one of { `"merge"`, `"replace"`, or `"none"` } *test_option* if specified must be one of { `"test_then_set"`, `"set"` } *error_option* if specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` } The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability. """ node = new_ele("edit-config") node.append(util.datastore_or_url("target", target, self._assert)) if error_option is not None: if error_option == "rollback-on-error": self._assert(":rollback-on-error") sub_ele(node, "error-option").text = error_option if test_option is not None: self._assert(':validate') sub_ele(node, "test-option").text = test_option if default_operation is not None: # TODO: check if it is a valid default-operation sub_ele(node, "default-operation").text = default_operation # <<<<<<< HEAD # node.append(validated_element(config, ("config", qualify("config")))) # ======= if format == 'xml': node.append(validated_element(config, ("config", qualify("config")))) if format == 'text': config_text = sub_ele(node, "config-text") sub_ele(config_text, "configuration-text").text = config # >>>>>>> juniper return self._request(node)
java
public static HELM2Notation getSirnaNotation(String senseSeq, String antiSenseSeq, String rnaDesignType) throws NotationException, FastaFormatException, HELM2HandledException, RNAUtilsException, org.helm.notation2.exception.NotationException, ChemistryException, CTKException, NucleotideLoadingException { HELM2Notation helm2notation = null; if (senseSeq != null && senseSeq.length() > 0) { helm2notation = SequenceConverter.readRNA(senseSeq); } if (antiSenseSeq != null && antiSenseSeq.length() > 0) { PolymerNotation antisense = new PolymerNotation("RNA2"); antisense = new PolymerNotation(antisense.getPolymerID(), FastaFormat.generateElementsforRNA(antiSenseSeq, antisense.getPolymerID())); helm2notation.addPolymer(antisense); } validateSiRNADesign(helm2notation.getListOfPolymers().get(0), helm2notation.getListOfPolymers().get(1), rnaDesignType); helm2notation.getListOfConnections().addAll(hybridization(helm2notation.getListOfPolymers().get(0), helm2notation.getListOfPolymers().get(1), rnaDesignType)); ChangeObjects.addAnnotation(new AnnotationNotation("RNA1{ss}|RNA2{as}"), 0, helm2notation); return helm2notation; }
java
public static ConnectionHandler register(final String id, final StatementHandler handler) { if (handler == null) { throw new IllegalArgumentException("Invalid handler: " + handler); } // end of if return register(id, new ConnectionHandler.Default(handler)); }
python
def unfold(tensor, mode): """Returns the mode-`mode` unfolding of `tensor`. Parameters ---------- tensor : ndarray mode : int Returns ------- ndarray unfolded_tensor of shape ``(tensor.shape[mode], -1)`` Author ------ Jean Kossaifi <https://github.com/tensorly> """ return np.moveaxis(tensor, mode, 0).reshape((tensor.shape[mode], -1))
java
protected String tableName(Class<?> entityClass) { EntityTable entityTable = EntityHelper.getEntityTable(entityClass); String prefix = entityTable.getPrefix(); if (StringUtil.isEmpty(prefix)) { //使用全局配置 prefix = mapperHelper.getConfig().getPrefix(); } if (StringUtil.isNotEmpty(prefix)) { return prefix + "." + entityTable.getName(); } return entityTable.getName(); }
java
protected DoubleMatrix1D gradFiStepX(DoubleMatrix1D stepX){ DoubleMatrix1D ret = F1.make(getMieq()); for(int i=0; i<getDim(); i++){ ret.setQuick( i, - stepX.getQuick(i)); ret.setQuick(getDim()+i, stepX.getQuick(i)); } return ret; }
python
def jsonmget(self, path, *args): """ Gets the objects stored as a JSON values under ``path`` from keys ``args`` """ pieces = [] pieces.extend(args) pieces.append(str_path(path)) return self.execute_command('JSON.MGET', *pieces)
java
protected void updateMetricCounters(String metricName, Map<String, Integer> metricNameCounters) { if (metricNameCounters.containsKey(metricName)) { metricNameCounters.put(metricName, metricNameCounters.get(metricName) + 1); } else { metricNameCounters.put(metricName, 1); } }
java
public boolean isInState(JComponent c) { Component parent = c; while (parent.getParent() != null) { if (parent instanceof RootPaneContainer) { break; } parent = parent.getParent(); } if (parent instanceof JFrame) { return (((JFrame) parent).getExtendedState() & Frame.MAXIMIZED_BOTH) != 0; } else if (parent instanceof JInternalFrame) { return ((JInternalFrame) parent).isMaximum(); } return false; }
java
private static String keyToGoWithElementsString(String label, DuplicateGroupedAndTyped elements) { /* * elements.toString(), which the caller is going to use, includes the homogeneous type (if * any), so we don't want to include it here. (And it's better to have it in the value, rather * than in the key, so that it doesn't push the horizontally aligned values over too far.) */ return lenientFormat("%s (%s)", label, elements.totalCopies()); }
python
def _find_base_type(data_type): """Find the Nani's base type for a given data type. This is useful when Nani's data types were subclassed and the original type is required. """ bases = type(data_type).__mro__ for base in bases: if base in _ALL: return base return None
java
public void deleteLaunchConfiguration(String launchConfigName) { final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient(); final DeleteLaunchConfigurationRequest deleteLaunchConfigurationRequest = new DeleteLaunchConfigurationRequest() .withLaunchConfigurationName(launchConfigName); autoScaling.deleteLaunchConfiguration(deleteLaunchConfigurationRequest); LOGGER.info("Deleted Launch Configuration: " + launchConfigName); }
python
def min_date(self, symbol): """ Return the minimum datetime stored for a particular symbol Parameters ---------- symbol : `str` symbol name for the item """ res = self._collection.find_one({SYMBOL: symbol}, projection={ID: 0, START: 1}, sort=[(START, pymongo.ASCENDING)]) if res is None: raise NoDataFoundException("No Data found for {}".format(symbol)) return utc_dt_to_local_dt(res[START])
python
def spawn_background_process(func, *args, **kwargs): """ Run a function in the background (like rebuilding some costly data structure) References: http://stackoverflow.com/questions/2046603/is-it-possible-to-run-function-in-a-subprocess-without-threading-or-writing-a-se http://stackoverflow.com/questions/1196074/starting-a-background-process-in-python http://stackoverflow.com/questions/15063963/python-is-thread-still-running Args: func (function): CommandLine: python -m utool.util_parallel --test-spawn_background_process Example: >>> # DISABLE_DOCTEST >>> # SLOW_DOCTEST >>> from utool.util_parallel import * # NOQA >>> import utool as ut >>> import time >>> from os.path import join >>> # build test data >>> fname = 'test_bgfunc_output.txt' >>> dpath = ut.get_app_resource_dir('utool') >>> ut.ensuredir(dpath) >>> fpath = join(dpath, fname) >>> # ensure file is not around >>> sleep_time = 1 >>> ut.delete(fpath) >>> assert not ut.checkpath(fpath, verbose=True) >>> def backgrond_func(fpath, sleep_time): ... import utool as ut ... import time ... print('[BG] Background Process has started') ... time.sleep(sleep_time) ... print('[BG] Background Process is writing') ... ut.write_to(fpath, 'background process') ... print('[BG] Background Process has finished') ... #raise AssertionError('test exception') >>> # execute function >>> func = backgrond_func >>> args = (fpath, sleep_time) >>> kwargs = {} >>> print('[FG] Spawning process') >>> threadid = ut.spawn_background_process(func, *args, **kwargs) >>> assert threadid.is_alive() is True, 'thread should be active' >>> print('[FG] Spawned process. threadid=%r' % (threadid,)) >>> # background process should not have finished yet >>> assert not ut.checkpath(fpath, verbose=True) >>> print('[FG] Waiting to check') >>> time.sleep(sleep_time + .1) >>> print('[FG] Finished waiting') >>> # Now the file should be there >>> assert ut.checkpath(fpath, verbose=True) >>> assert threadid.is_alive() is False, 'process should have died' """ import utool as ut func_name = ut.get_funcname(func) name = 'mp.Progress-' + func_name #proc_obj = multiprocessing.Process(target=func, name=name, args=args, kwargs=kwargs) proc_obj = KillableProcess(target=func, name=name, args=args, kwargs=kwargs) #proc_obj.daemon = True #proc_obj.isAlive = proc_obj.is_alive proc_obj.start() return proc_obj
python
def fetch(self, obj, include_meta=False, chunk_size=None, size=None, extra_info=None): """ Fetches the object from storage. If 'include_meta' is False, only the bytes representing the stored object are returned. Note: if 'chunk_size' is defined, you must fully read the object's contents before making another request. If 'size' is specified, only the first 'size' bytes of the object will be returned. If the object if smaller than 'size', the entire object is returned. When 'include_meta' is True, what is returned from this method is a 2-tuple: Element 0: a dictionary containing metadata about the file. Element 1: a stream of bytes representing the object's contents. The 'extra_info' parameter is included for backwards compatibility. It is no longer used at all, and will not be modified with swiftclient info, since swiftclient is not used any more. """ return self.object_manager.fetch(obj, include_meta=include_meta, chunk_size=chunk_size, size=size)
java
synchronized void computeTrees() { boolean changed; try { createBuffers(); do { switchBuffers(); changed = computeOneLevel(); //System.out.println("Tree obj. "+heap.idToOffsetMap.treeObj); //if (changed) System.out.println("Next level "+nextLevelSize); } while (changed); } catch (IOException ex) { ex.printStackTrace(); } deleteBuffers(); //System.out.println("Done!"); }
python
def metadata_extractor(self): """Returns an instance of proper MetadataExtractor subclass. Always returns the same instance. Returns: The proper MetadataExtractor subclass according to local file suffix. """ if not hasattr(self, '_local_file'): raise AttributeError("local_file attribute must be set before " "calling metadata_extractor") if not hasattr(self, '_metadata_extractor'): if self.local_file.endswith('.whl'): logger.info("Getting metadata from wheel using " "WheelMetadataExtractor.") extractor_cls = metadata_extractors.WheelMetadataExtractor else: logger.info("Getting metadata from setup.py using " "SetupPyMetadataExtractor.") extractor_cls = metadata_extractors.SetupPyMetadataExtractor base_python_version = ( self.base_python_version or self.template_base_py_ver) self._metadata_extractor = extractor_cls( self.local_file, self.name, self.name_convertor, self.version, self.rpm_name, self.venv, base_python_version) return self._metadata_extractor
java
List<Long> includeUpdateBatch(TableKeyBatch batch, long batchOffset, int generation) { val result = new ArrayList<Long>(batch.getItems().size()); synchronized (this) { for (TableKeyBatch.Item item : batch.getItems()) { long itemOffset = batchOffset + item.getOffset(); CacheBucketOffset existingOffset = get(item.getHash(), generation); if (existingOffset == null || itemOffset > existingOffset.getSegmentOffset()) { // We have no previous entry, or we do and the current offset is higher, so it prevails. this.tailOffsets.put(item.getHash(), new CacheBucketOffset(itemOffset, batch.isRemoval())); result.add(itemOffset); } else { // Current offset is lower. result.add(existingOffset.getSegmentOffset()); } if (existingOffset != null) { // Only record a backpointer if we have a previous location to point to. this.backpointers.put(itemOffset, existingOffset.getSegmentOffset()); } } } return result; }
java
public BeanType<ValidationMappingDescriptor> getOrCreateBean() { List<Node> nodeList = model.get("bean"); if (nodeList != null && nodeList.size() > 0) { return new BeanTypeImpl<ValidationMappingDescriptor>(this, "bean", model, nodeList.get(0)); } return createBean(); }
python
def robust_backtrack(self): """Estimate step size L by computing a linesearch that guarantees that F <= Q according to the robust FISTA backtracking strategy in :cite:`florea-2017-robust`. This also updates all the supporting variables. """ self.L *= self.L_gamma_d maxiter = self.L_maxiter iterBTrack = 0 linesearch = 1 self.store_Yprev() while linesearch and iterBTrack < maxiter: t = float(1. + np.sqrt(1. + 4. * self.L * self.Tk)) / (2. * self.L) T = self.Tk + t y = (self.Tk * self.var_xprv() + t * self.ZZ) / T self.update_var_y(y) gradY = self.proximal_step() # Given Y(f), L, this updates X(f) f = self.obfn_f(self.var_x()) Dxy = self.eval_Dxy() Q = self.obfn_f(self.var_y()) + \ self.eval_linear_approx(Dxy, gradY) + \ (self.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2 if f <= Q: linesearch = 0 else: self.L *= self.L_gamma_u iterBTrack += 1 self.Tk = T self.ZZ += (t * self.L * (self.var_x() - self.var_y())) self.F = f self.Q = Q self.iterBTrack = iterBTrack
java
public final void intialize() throws IllegalStateException { synchronized (this) { if (tracker != null) { throw new IllegalStateException( "DelegatingComponentInstanciationListener [" + this + "] had been initialized."); } tracker = new ComponentInstanciationListenerTracker(context, applicationName); tracker.open(); } }
python
def get_vsan_enabled(host, username, password, protocol=None, port=None, host_names=None): ''' Get the VSAN enabled status for a given host or a list of host_names. Returns ``True`` if VSAN is enabled, ``False`` if it is not enabled, and ``None`` if a VSAN Host Config is unset, per host. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter which hosts to check if VSAN enabled. If host_names is not provided, the VSAN status will be retrieved for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.get_vsan_enabled my.esxi.host root bad-password # Used for connecting to a vCenter Server salt '*' vsphere.get_vsan_enabled my.vcenter.location root bad-password \ host_names='[esxi-1.host.com, esxi-2.host.com]' ''' service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) ret = {} for host_name in host_names: host_ref = _get_host_ref(service_instance, host, host_name=host_name) vsan_config = host_ref.config.vsanHostConfig # We must have a VSAN Config in place get information about VSAN state. if vsan_config is None: msg = 'VSAN System Config Manager is unset for host \'{0}\'.'.format(host_name) log.debug(msg) ret.update({host_name: {'Error': msg}}) else: ret.update({host_name: {'VSAN Enabled': vsan_config.enabled}}) return ret
java
protected void initCDIIntegration( ServletContext servletContext, ExternalContext externalContext) { // Lookup bean manager and put it into an application scope attribute to // access it later. Remember the trick here is do not call any CDI api // directly, so if no CDI api is on the classpath no exception will be thrown. // Try with servlet context Object beanManager = servletContext.getAttribute( CDI_SERVLET_CONTEXT_BEAN_MANAGER_ATTRIBUTE); if (beanManager == null) { // Use reflection to avoid restricted API in GAE Class icclazz = null; Method lookupMethod = null; try { icclazz = ClassUtils.simpleClassForName("javax.naming.InitialContext"); if (icclazz != null) { lookupMethod = icclazz.getMethod("doLookup", String.class); } } catch (Throwable t) { // } if (lookupMethod != null) { // Try with JNDI try { // in an application server //beanManager = InitialContext.doLookup("java:comp/BeanManager"); beanManager = lookupMethod.invoke(icclazz, "java:comp/BeanManager"); } catch (Exception e) { // silently ignore } catch (NoClassDefFoundError e) { //On Google App Engine, javax.naming.Context is a restricted class. //In that case, NoClassDefFoundError is thrown. stageName needs to be configured //below by context parameter. } if (beanManager == null) { try { // in a servlet container //beanManager = InitialContext.doLookup("java:comp/env/BeanManager"); beanManager = lookupMethod.invoke(icclazz, "java:comp/env/BeanManager"); } catch (Exception e) { // silently ignore } catch (NoClassDefFoundError e) { //On Google App Engine, javax.naming.Context is a restricted class. //In that case, NoClassDefFoundError is thrown. stageName needs to be configured //below by context parameter. } } } } if (beanManager != null) { externalContext.getApplicationMap().put(CDI_BEAN_MANAGER_INSTANCE, beanManager); } }
python
def createObjBuilders(env): """This is a utility function that creates the StaticObject and SharedObject Builders in an Environment if they are not there already. If they are there already, we return the existing ones. This is a separate function because soooo many Tools use this functionality. The return is a 2-tuple of (StaticObject, SharedObject) """ try: static_obj = env['BUILDERS']['StaticObject'] except KeyError: static_obj = SCons.Builder.Builder(action = {}, emitter = {}, prefix = '$OBJPREFIX', suffix = '$OBJSUFFIX', src_builder = ['CFile', 'CXXFile'], source_scanner = SourceFileScanner, single_source = 1) env['BUILDERS']['StaticObject'] = static_obj env['BUILDERS']['Object'] = static_obj try: shared_obj = env['BUILDERS']['SharedObject'] except KeyError: shared_obj = SCons.Builder.Builder(action = {}, emitter = {}, prefix = '$SHOBJPREFIX', suffix = '$SHOBJSUFFIX', src_builder = ['CFile', 'CXXFile'], source_scanner = SourceFileScanner, single_source = 1) env['BUILDERS']['SharedObject'] = shared_obj return (static_obj, shared_obj)
java
int getChainUserIndex(int chain, int index) { int i = getChainIndex_(chain); AttributeStreamOfInt32 stream = m_chainIndices.get(index); if (stream.size() <= i) return -1; return stream.read(i); }
python
def _find_parent_directory(directory, filename): """Find a directory in parent tree with a specific filename :param directory: directory name to find :param filename: filename to find :returns: absolute directory path """ parent_directory = directory absolute_directory = '.' while absolute_directory != os.path.abspath(parent_directory): absolute_directory = os.path.abspath(parent_directory) if os.path.isfile(os.path.join(absolute_directory, filename)): return absolute_directory if os.path.isabs(parent_directory): parent_directory = os.path.join(os.path.dirname(parent_directory), '..', os.path.basename(parent_directory)) else: parent_directory = os.path.join('..', parent_directory) return os.path.abspath(directory)
java
public static <T> Collection<T> filter(Collection<T> collection, Editor<T> editor) { if (null == collection || null == editor) { return collection; } Collection<T> collection2 = ObjectUtil.clone(collection); try { collection2.clear(); } catch (UnsupportedOperationException e) { // 克隆后的对象不支持清空,说明为不可变集合对象,使用默认的ArrayList保存结果 collection2 = new ArrayList<>(); } T modified; for (T t : collection) { modified = editor.edit(t); if (null != modified) { collection2.add(modified); } } return collection2; }
java
public static String stripNonValidXMLCharacters(String input) { if (input == null || ("".equals(input))) return ""; StringBuilder out = new StringBuilder(); char current; for (int i = 0; i < input.length(); i++) { current = input.charAt(i); if ((current == 0x9) || (current == 0xA) || (current == 0xD) || ((current >= 0x20) && (current <= 0xD7FF)) || ((current >= 0xE000) && (current <= 0xFFFD)) || ((current >= 0x10000) && (current <= 0x10FFFF))) out.append(current); } return out.toString(); }
python
def get_uid_state(self, id_or_uri): """ Retrieves the unit identification (UID) state (on, off, unknown) of the specified power outlet or extension bar resource. The device must be an HP iPDU component with a locator light (HP Intelligent Load Segment, HP AC Module, HP Intelligent Outlet Bar, or HP Intelligent Outlet). Args: id_or_uri: Can be either the power device id or the uri Returns: str: unit identification (UID) state """ uri = self._client.build_uri(id_or_uri) + "/uidState" return self._client.get(uri)
python
def probability(self, direction, mechanism, purview): """Probability that the purview is in it's current state given the state of the mechanism. """ repertoire = self.repertoire(direction, mechanism, purview) return self.state_probability(direction, repertoire, purview)
python
def analyze(self, scratch, **kwargs): """Run and return the results from the BroadcastReceive plugin.""" all_scripts = list(self.iter_scripts(scratch)) results = defaultdict(set) broadcast = dict((x, self.get_broadcast_events(x)) # Events by script for x in all_scripts) correct = self.get_receive(all_scripts) results['never broadcast'] = set(correct.keys()) for script, events in broadcast.items(): for event in events.keys(): if event is True: # Remove dynamic broadcasts results['dynamic broadcast'].add(script.morph.name) del events[event] elif event in correct: results['never broadcast'].discard(event) else: results['never received'].add(event) # remove events from correct dict that were never broadcast for event in correct.keys(): if event in results['never broadcast']: del correct[event] # Find scripts that have more than one broadcast event on any possible # execution path through the program # TODO: Permit mutually exclusive broadcasts for events in broadcast.values(): if len(events) > 1: for event in events: if event in correct: results['parallel broadcasts'].add(event) del correct[event] # Find events that have two (or more) receivers in which one of the # receivers has a "delay" block for event, scripts in correct.items(): if len(scripts) > 1: for script in scripts: for _, _, block in self.iter_blocks(script.blocks): if block.type.shape == 'stack': results['multiple receivers with delay'].add(event) if event in correct: del correct[event] results['success'] = set(correct.keys()) return {'broadcast': results}
python
def get_column(self, column_name, column_type, index, verbose=True): """Summary Args: column_name (TYPE): Description column_type (TYPE): Description index (TYPE): Description Returns: TYPE: Description """ return LazyOpResult( grizzly_impl.get_column( self.expr, self.weld_type, index ), column_type, 1 )
python
def span_context_from_string(value): """ Decode span ID from a string into a TraceContext. Returns None if the string value is malformed. :param value: formatted {trace_id}:{span_id}:{parent_id}:{flags} """ if type(value) is list and len(value) > 0: # sometimes headers are presented as arrays of values if len(value) > 1: raise SpanContextCorruptedException( 'trace context must be a string or array of 1: "%s"' % value) value = value[0] if not isinstance(value, six.string_types): raise SpanContextCorruptedException( 'trace context not a string "%s"' % value) parts = value.split(':') if len(parts) != 4: raise SpanContextCorruptedException( 'malformed trace context "%s"' % value) try: trace_id = int(parts[0], 16) span_id = int(parts[1], 16) parent_id = int(parts[2], 16) flags = int(parts[3], 16) if trace_id < 1 or span_id < 1 or parent_id < 0 or flags < 0: raise SpanContextCorruptedException( 'malformed trace context "%s"' % value) if parent_id == 0: parent_id = None return trace_id, span_id, parent_id, flags except ValueError as e: raise SpanContextCorruptedException( 'malformed trace context "%s": %s' % (value, e))
python
def pandas_df(self): """ Returns pandas DataFrame containing pixel counts for all truth classes, classified classes (for each truth class), and file name of the input EODataSet. The data frame thus contains N = self.n_validation_sets rows and M = len(self.truth_classes) + len(self.truth_classes) * len (self.class_dictionary) + 1 columns """ if self.val_df is not None: return self.val_df clf = self.pixel_classification_counts.reshape(self.pixel_classification_counts.shape[0], self.pixel_classification_counts.shape[1] * self.pixel_classification_counts.shape[2]) combo = np.hstack((self.pixel_truth_counts, clf)) columns = list(itertools.product(self.truth_classes, list(self.class_dictionary.keys()))) columns = [(item[0] + '_as_' + item[1]).replace(" ", "_") for item in columns] truth_columns = ['truth_' + item.replace(" ", "_") for item in self.truth_classes] self.val_df = pd.DataFrame(combo, columns=truth_columns + columns) return self.val_df
java
public Result<V,E> put( K key, Refresher<? super K,? extends V,? extends E> refresher ) { Result<V,E> result = runRefresher(refresher, key); put(key, refresher, result); return result; }
python
def request_sync_events(blink, network): """ Request events from sync module. :param blink: Blink instance. :param network: Sync module network id. """ url = "{}/events/network/{}".format(blink.urls.base_url, network) return http_get(blink, url)
python
def add_configuration_file(self, file_name): '''Register a file path from which to read parameter values. This method can be called multiple times to register multiple files for querying. Files are expected to be ``ini`` formatted. No assumptions should be made about the order that the registered files are read and values defined in multiple files may have unpredictable results. **Arguments** :``file_name``: Name of the file to add to the parameter search. ''' logger.info('adding %s to configuration files', file_name) if file_name not in self.configuration_files and self._inotify: self._watch_manager.add_watch(file_name, pyinotify.IN_MODIFY) if os.access(file_name, os.R_OK): self.configuration_files[file_name] = SafeConfigParser() self.configuration_files[file_name].read(file_name) else: logger.warn('could not read %s', file_name) warnings.warn('could not read {}'.format(file_name), ResourceWarning)
python
def _add_event_in_element(self, element, event): """ Add a type of event in element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param event: The type of event. :type event: str """ if not self.main_script_added: self._generate_main_scripts() if self.script_list is not None: self.id_generator.generate_id(element) self.script_list.append_text( event + "Elements.push('" + element.get_attribute('id') + "');" )
java
@Override public QOr appendSQL(final SQLSelect _sql) throws EFapsException { _sql.addPart(SQLPart.PARENTHESIS_OPEN); boolean first = true; for (final AbstractQPart part : getParts()) { if (first) { first = false; } else { _sql.addPart(SQLPart.OR); } part.appendSQL(_sql); } _sql.addPart(SQLPart.PARENTHESIS_CLOSE); return this; }
python
def _set_interface_PO_ospf_conf(self, v, load=False): """ Setter method for interface_PO_ospf_conf, mapped from YANG variable /interface/port_channel/ip/interface_PO_ospf_conf (container) If this variable is read-only (config: false) in the source YANG file, then _set_interface_PO_ospf_conf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_interface_PO_ospf_conf() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """interface_PO_ospf_conf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__interface_PO_ospf_conf = t if hasattr(self, '_set'): self._set()
python
def fail(self, key, **kwargs): """A helper method that simply raises a `ValidationError`. """ try: msg = self.error_messages[key] except KeyError: class_name = self.__class__.__name__ msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key) raise AssertionError(msg) if isinstance(msg, str): msg = msg.format(**kwargs) raise exceptions.ValidationError(msg, self.field_name)
python
def torrent_from_url(self, url, cache=True, prefetch=False): """Create a Torrent object from a given URL. If the cache option is set, check to see if we already have a Torrent object representing it. If prefetch is set, automatically query the torrent's info page to fill in the torrent object. (If prefetch is false, then the torrent page will be queried lazily on-demand.) """ if self._use_cache(cache) and url in self._torrent_cache: return self._torrent_cache[url] torrent = Torrent(url, cache, prefetch) if cache: self._torrent_cache[url] = torrent return torrent
python
def _available(name, ret): ''' Check if the service is available ''' avail = False if 'service.available' in __salt__: avail = __salt__['service.available'](name) elif 'service.get_all' in __salt__: avail = name in __salt__['service.get_all']() if not avail: ret['result'] = False ret['comment'] = 'The named service {0} is not available'.format(name) return avail
java
public List<IntentClassifier> listCustomPrebuiltIntents(UUID appId, String versionId) { return listCustomPrebuiltIntentsWithServiceResponseAsync(appId, versionId).toBlocking().single().body(); }
java
public SipURI createSipURI(String user, String host) { if (logger.isDebugEnabled()) { logger.debug("Creating SipURI from USER[" + user + "] HOST[" + host + "]"); } // Fix for http://code.google.com/p/sipservlets/issues/detail?id=145 if(user != null && user.trim().isEmpty()) { user = null; } try { return new SipURIImpl(SipFactoryImpl.addressFactory.createSipURI( user, host), ModifiableRule.Modifiable); } catch (ParseException e) { logger.error("couldn't parse the SipURI from USER[" + user + "] HOST[" + host + "]", e); throw new IllegalArgumentException("Could not create SIP URI user = " + user + " host = " + host); } }
python
def detect_blob(self, img, filters): """ "filters" must be something similar to: filters = { 'R': (150, 255), # (min, max) 'S': (150, 255), } """ acc_mask = ones(img.shape[:2], dtype=uint8) * 255 rgb = img.copy() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) for c, (min, max) in filters.items(): img = rgb if c in 'RGB' else hsv mask = img[:, :, self.channels[c]] mask[mask < min] = 0 mask[mask > max] = 0 acc_mask &= mask kernel = ones((5, 5), uint8) acc_mask = cv2.dilate(cv2.erode(acc_mask, kernel), kernel) circles = cv2.HoughCircles(acc_mask, cv2.HOUGH_GRADIENT, 3, img.shape[0] / 5.) return circles.reshape(-1, 3) if circles is not None else []
java
public Variable createPipelineScheduleVariable(Object projectIdOrPath, Integer pipelineScheduleId, String key, String value) throws GitLabApiException { GitLabApiForm formData = new GitLabApiForm() .withParam("key", key, true) .withParam("value", value, true); Response response = post(Response.Status.CREATED, formData, "projects", getProjectIdOrPath(projectIdOrPath), "pipeline_schedules", pipelineScheduleId, "variables"); return (response.readEntity(Variable.class)); }
python
def create_qualification_type(Name=None, Keywords=None, Description=None, QualificationTypeStatus=None, RetryDelayInSeconds=None, Test=None, AnswerKey=None, TestDurationInSeconds=None, AutoGranted=None, AutoGrantedValue=None): """ The CreateQualificationType operation creates a new Qualification type, which is represented by a QualificationType data structure. See also: AWS API Documentation :example: response = client.create_qualification_type( Name='string', Keywords='string', Description='string', QualificationTypeStatus='Active'|'Inactive', RetryDelayInSeconds=123, Test='string', AnswerKey='string', TestDurationInSeconds=123, AutoGranted=True|False, AutoGrantedValue=123 ) :type Name: string :param Name: [REQUIRED] The name you give to the Qualification type. The type name is used to represent the Qualification to Workers, and to find the type using a Qualification type search. It must be unique across all of your Qualification types. :type Keywords: string :param Keywords: One or more words or phrases that describe the Qualification type, separated by commas. The keywords of a type make the type easier to find during a search. :type Description: string :param Description: [REQUIRED] A long description for the Qualification type. On the Amazon Mechanical Turk website, the long description is displayed when a Worker examines a Qualification type. :type QualificationTypeStatus: string :param QualificationTypeStatus: [REQUIRED] The initial status of the Qualification type. Constraints: Valid values are: Active | Inactive :type RetryDelayInSeconds: integer :param RetryDelayInSeconds: The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request. Constraints: None. If not specified, retries are disabled and Workers can request a Qualification of this type only once, even if the Worker has not been granted the Qualification. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must delete existing retry-enabled Qualification type and then create a new Qualification type with retries disabled. :type Test: string :param Test: The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, TestDurationInSeconds must also be specified. Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true. Constraints: None. If not specified, the Worker may request the Qualification without answering any questions. :type AnswerKey: string :param AnswerKey: The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure. Constraints: Must not be longer than 65535 bytes. Constraints: None. If not specified, you must process Qualification requests manually. :type TestDurationInSeconds: integer :param TestDurationInSeconds: The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification. :type AutoGranted: boolean :param AutoGranted: Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Constraints: If the Test parameter is specified, this parameter cannot be true. :type AutoGrantedValue: integer :param AutoGrantedValue: The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true. :rtype: dict :return: { 'QualificationType': { 'QualificationTypeId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Name': 'string', 'Description': 'string', 'Keywords': 'string', 'QualificationTypeStatus': 'Active'|'Inactive', 'Test': 'string', 'TestDurationInSeconds': 123, 'AnswerKey': 'string', 'RetryDelayInSeconds': 123, 'IsRequestable': True|False, 'AutoGranted': True|False, 'AutoGrantedValue': 123 } } """ pass
java
protected void logParameterError(final Object caller, final Object[] parameters, final boolean inJavaScriptContext) { logParameterError(caller, parameters, "Unsupported parameter combination/count in", inJavaScriptContext); }
java
@Override public Stream<T> queued(int queueSize) { final Iterator<T> iter = iterator(); if (iter instanceof QueuedIterator && ((QueuedIterator<? extends T>) iter).max() >= queueSize) { return newStream(elements, sorted, cmp); } else { return newStream(Stream.parallelConcatt(Arrays.asList(iter), 1, queueSize), sorted, cmp); } }
python
def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear()
python
def has_authority_over(self, url): """Return True of the current master has authority over url. In strict mode checks scheme, server and path. Otherwise checks just that the server names match or the query url is a sub-domain of the master. """ s = urlparse(url) if (s.scheme != self.master_scheme): return(False) if (s.netloc != self.master_netloc): if (not s.netloc.endswith('.' + self.master_netloc)): return(False) # Maybe should allow parallel for 3+ components, eg. a.example.org, # b.example.org path = os.path.dirname(s.path) if (self.strict and path != self.master_path and not path.startswith(self.master_path)): return(False) return(True)
python
def write_base (self, url_data): """Write url_data.base_ref.""" self.writeln(u"<tr><td>"+self.part("base")+u"</td><td>"+ cgi.escape(url_data.base_ref)+u"</td></tr>")
java
public void accept(XVisitor visitor, XLog log) { /* * First call. */ visitor.visitTracePre(this, log); /* * Visit the attributes. */ for (XAttribute attribute: attributes.values()) { attribute.accept(visitor, this); } /* * Visit the events. */ for (XEvent event: this) { event.accept(visitor, this); } /* * Last call. */ visitor.visitTracePost(this, log); }
java
private void checkQueueSize() { int queueSize = getQueueSize(); if (SEND_QUEUE_SIZE_WARNING_THRESHOLD > 0 && queueSize >= SEND_QUEUE_SIZE_WARNING_THRESHOLD) { logger.warn("The Gerrit send commands queue contains {} items!" + " Something might be stuck, or your system can't process the commands fast enough." + " Try to increase the number of sending worker threads." + " Current thread-pool size: {}", queueSize, executor.getPoolSize()); logger.info("Nr of active pool-threads: {}", executor.getActiveCount()); } }
python
def read_discrete_trajectory(filename): """Read discrete trajectory from ascii file. The ascii file containing a single column with integer entries is read into an array of integers. Parameters ---------- filename : str The filename of the discrete state trajectory file. The filename can either contain the full or the relative path to the file. Returns ------- dtraj : (M, ) ndarray Discrete state trajectory. """ with open(filename, "r") as f: lines=f.read() dtraj=np.fromstring(lines, dtype=int, sep="\n") return dtraj
python
def addDocEntity(self, name, type, ExternalID, SystemID, content): """Register a new entity for this document. """ ret = libxml2mod.xmlAddDocEntity(self._o, name, type, ExternalID, SystemID, content) if ret is None:raise treeError('xmlAddDocEntity() failed') __tmp = xmlEntity(_obj=ret) return __tmp
python
def view_admin_log(): """Page for viewing the log of admin activity.""" build = g.build # TODO: Add paging log_list = ( models.AdminLog.query .filter_by(build_id=build.id) .order_by(models.AdminLog.created.desc()) .all()) return render_template( 'view_admin_log.html', build=build, log_list=log_list)
python
def _carregar(self): """Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada ainda não tiver sido definida, será determinada pela extensão do arquivo da biblioteca. :raises ValueError: Se a convenção de chamada não puder ser determinada ou se não for um valor válido. """ if self._convencao is None: if self._caminho.endswith(('.DLL', '.dll')): self._convencao = constantes.WINDOWS_STDCALL else: self._convencao = constantes.STANDARD_C if self._convencao == constantes.STANDARD_C: loader = ctypes.CDLL elif self._convencao == constantes.WINDOWS_STDCALL: loader = ctypes.WinDLL else: raise ValueError('Convencao de chamada desconhecida: {!r}'.format( self._convencao)) self._libsat = loader(self._caminho)