language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static AddressInfo of(String region, String name) { return of(RegionAddressId.of(region, name)); }
python
def get_calling_module_object_and_name(): """Returns the module that's calling into this module. We generally use this function to get the name of the module calling a DEFINE_foo... function. Returns: The module object that called into this one. Raises: AssertionError: Raised when no calling module could be identified. """ for depth in range(1, sys.getrecursionlimit()): # sys._getframe is the right thing to use here, as it's the best # way to walk up the call stack. globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access module, module_name = get_module_object_and_name(globals_for_frame) if id(module) not in disclaim_module_ids and module_name is not None: return _ModuleObjectAndName(module, module_name) raise AssertionError('No module was found')
python
def decode_data_with_length(reader) -> bytes: """ Read data from a reader. Data is prefixed with 2 bytes length :param reader: Stream reader :return: bytes read from stream (without length) """ length_bytes = yield from read_or_raise(reader, 2) bytes_length = unpack("!H", length_bytes) data = yield from read_or_raise(reader, bytes_length[0]) return data
java
@Indexable(type = IndexableType.DELETE) @Override public CommerceWishList deleteCommerceWishList(long commerceWishListId) throws PortalException { return commerceWishListPersistence.remove(commerceWishListId); }
python
def selectrangeopenleft(table, field, minv, maxv, complement=False): """Select rows where the given field is greater than or equal to `minv` and less than `maxv`.""" minv = Comparable(minv) maxv = Comparable(maxv) return select(table, field, lambda v: minv <= v < maxv, complement=complement)
python
def create(self, graph): """ Create a new scoped component. """ scoped_config = self.get_scoped_config(graph) scoped_graph = ScopedGraph(graph, scoped_config) return self.func(scoped_graph)
java
public SegmentedSuggestion wordSegmentation(String input, int maxEditDistance, int maxSegmentationWordLength) { if (input.isEmpty()) { return new SegmentedSuggestion(); } int arraySize = Math.min(maxSegmentationWordLength, input.length()); SegmentedSuggestion[] compositions = new SegmentedSuggestion[arraySize]; for (int i = 0; i < arraySize; i++) { compositions[i] = new SegmentedSuggestion(); } int circularIndex = -1; //outer loop (column): all possible part start positions for (int j = 0; j < input.length(); j++) { //inner loop (row): all possible part lengths (from start position): part can't be bigger than longest word in dictionary (other than long unknown word) int imax = Math.min(input.length() - j, maxSegmentationWordLength); for (int i = 1; i <= imax; i++) { //get top spelling correction/ed for part String part = input.substring(j, j + i); int separatorLength = 0; int topEd = 0; double topProbabilityLog; String topResult; if (Character.isWhitespace(part.charAt(0))) { //remove space for levensthein calculation part = part.substring(1); } else { //add ed+1: space did not exist, had to be inserted separatorLength = 1; } //remove space from part1, add number of removed spaces to topEd topEd += part.length(); //remove space part = part.replace(" ", ""); //=System.Text.RegularExpressions.Regex.Replace(part1, @"\s+", ""); //add number of removed spaces to ed topEd -= part.length(); List<SuggestItem> results = this.lookup(part, SymSpell.Verbosity.Top, maxEditDistance); if (results.size() > 0) { topResult = results.get(0).term; topEd += results.get(0).distance; //Naive Bayes Rule //we assume the word probabilities of two words to be independent //therefore the resulting probability of the word combination is the product of the two word probabilities //instead of computing the product of probabilities we are computing the sum of the logarithm of probabilities //because the probabilities of words are about 10^-10, the product of many such small numbers could exceed (underflow) the floating number range and become zero //log(ab)=log(a)+log(b) topProbabilityLog = Math.log10((double) results.get(0).count / (double) N); } else { topResult = part; //default, if word not found //otherwise long input text would win as long unknown word (with ed=edmax+1 ), although there there should many spaces inserted topEd += part.length(); topProbabilityLog = Math.log10(10.0 / (N * Math.pow(10.0, part.length()))); } int destinationIndex = ((i + circularIndex) % arraySize); //set values in first loop if (j == 0) { compositions[destinationIndex].segmentedString = part; compositions[destinationIndex].correctedString = topResult; compositions[destinationIndex].distanceSum = topEd; compositions[destinationIndex].probabilityLogSum = topProbabilityLog; } else if ((i == maxSegmentationWordLength) //replace values if better probabilityLogSum, if same edit distance OR one space difference || (((compositions[circularIndex].distanceSum + topEd == compositions[destinationIndex].distanceSum) || (compositions[circularIndex].distanceSum + separatorLength + topEd == compositions[destinationIndex].distanceSum)) && (compositions[destinationIndex].probabilityLogSum < compositions[circularIndex].probabilityLogSum + topProbabilityLog)) //replace values if smaller edit distance || (compositions[circularIndex].distanceSum + separatorLength + topEd < compositions[destinationIndex].distanceSum)) { compositions[destinationIndex].segmentedString = compositions[circularIndex].segmentedString + " " + part; compositions[destinationIndex].correctedString = compositions[circularIndex].correctedString + " " + topResult; compositions[destinationIndex].distanceSum = compositions[circularIndex].distanceSum + topEd; compositions[destinationIndex].probabilityLogSum = compositions[circularIndex].probabilityLogSum + topProbabilityLog; } } circularIndex++; if (circularIndex >= arraySize) { circularIndex = 0; } } return compositions[circularIndex]; }
java
public void incrementDependencySufficientStatistics(SufficientStatistics gradient, SufficientStatistics parameters, List<String> posTags, Collection<DependencyStructure> dependencies, double count) { int[] puncCounts = CcgParser.computeDistanceCounts(posTags, puncTagSet); int[] verbCounts = CcgParser.computeDistanceCounts(posTags, verbTagSet); SufficientStatistics dependencyGradient = gradient.coerceToList().getStatisticByName(DEPENDENCY_PARAMETERS); SufficientStatistics wordDistanceGradient = gradient.coerceToList().getStatisticByName(WORD_DISTANCE_PARAMETERS); SufficientStatistics puncDistanceGradient = gradient.coerceToList().getStatisticByName(PUNC_DISTANCE_PARAMETERS); SufficientStatistics verbDistanceGradient = gradient.coerceToList().getStatisticByName(VERB_DISTANCE_PARAMETERS); SufficientStatistics dependencyParameters = parameters.coerceToList().getStatisticByName(DEPENDENCY_PARAMETERS); SufficientStatistics wordDistanceParameters = parameters.coerceToList().getStatisticByName(WORD_DISTANCE_PARAMETERS); SufficientStatistics puncDistanceParameters = parameters.coerceToList().getStatisticByName(PUNC_DISTANCE_PARAMETERS); SufficientStatistics verbDistanceParameters = parameters.coerceToList().getStatisticByName(VERB_DISTANCE_PARAMETERS); for (DependencyStructure dependency : dependencies) { int headWordIndex = dependency.getHeadWordIndex(); int objectWordIndex = dependency.getObjectWordIndex(); Assignment predicateAssignment = Assignment.unionAll( dependencyHeadVar.outcomeArrayToAssignment(dependency.getHead()), dependencySyntaxVar.outcomeArrayToAssignment(dependency.getHeadSyntacticCategory()), dependencyArgNumVar.outcomeArrayToAssignment(dependency.getArgIndex()), dependencyHeadPosVar.outcomeArrayToAssignment(posTags.get(headWordIndex)), dependencyArgPosVar.outcomeArrayToAssignment(posTags.get(objectWordIndex))); Assignment assignment = predicateAssignment.union( dependencyArgVar.outcomeArrayToAssignment(dependency.getObject())); dependencyFamily.incrementSufficientStatisticsFromAssignment(dependencyGradient, dependencyParameters, assignment, count); // Update distance parameters. int wordDistance = CcgParser.computeWordDistance(headWordIndex, objectWordIndex); int puncDistance = CcgParser.computeArrayDistance(puncCounts, headWordIndex, objectWordIndex); int verbDistance = CcgParser.computeArrayDistance(verbCounts, headWordIndex, objectWordIndex); Assignment wordDistanceAssignment = predicateAssignment.union( wordDistanceVar.outcomeArrayToAssignment(wordDistance)); wordDistanceFamily.incrementSufficientStatisticsFromAssignment(wordDistanceGradient, wordDistanceParameters, wordDistanceAssignment, count); Assignment puncDistanceAssignment = predicateAssignment.union( puncDistanceVar.outcomeArrayToAssignment(puncDistance)); puncDistanceFamily.incrementSufficientStatisticsFromAssignment(puncDistanceGradient, puncDistanceParameters, puncDistanceAssignment, count); Assignment verbDistanceAssignment = predicateAssignment.union( verbDistanceVar.outcomeArrayToAssignment(verbDistance)); verbDistanceFamily.incrementSufficientStatisticsFromAssignment(verbDistanceGradient, verbDistanceParameters, verbDistanceAssignment, count); } }
python
def _get_default_radius(site): """ An internal method to get a "default" covalent/element radius Args: site: (Site) Returns: Covalent radius of element on site, or Atomic radius if unavailable """ try: return CovalentRadius.radius[site.specie.symbol] except: return site.specie.atomic_radius
python
def replace_acquaintance_with_swap_network( circuit: circuits.Circuit, qubit_order: Sequence[ops.Qid], acquaintance_size: Optional[int] = 0, swap_gate: ops.Gate = ops.SWAP ) -> bool: """ Replace every moment containing acquaintance gates (after rectification) with a generalized swap network, with the partition given by the acquaintance gates in that moment (and singletons for the free qubits). Accounts for reversing effect of swap networks. Args: circuit: The acquaintance strategy. qubit_order: The qubits, in order, on which the replacing swap network gate acts on. acquaintance_size: The acquaintance size of the new swap network gate. swap_gate: The gate used to swap logical indices. Returns: Whether or not the overall effect of the inserted swap network gates is to reverse the order of the qubits, i.e. the parity of the number of swap network gates inserted. Raises: TypeError: circuit is not an acquaintance strategy. """ if not is_acquaintance_strategy(circuit): raise TypeError('not is_acquaintance_strategy(circuit)') rectify_acquaintance_strategy(circuit) reflected = False reverse_map = {q: r for q, r in zip(qubit_order, reversed(qubit_order))} for moment_index, moment in enumerate(circuit): if reflected: moment = moment.transform_qubits(reverse_map.__getitem__) if all(isinstance(op.gate, AcquaintanceOpportunityGate) for op in moment.operations): swap_network_gate = SwapNetworkGate.from_operations( qubit_order, moment.operations, acquaintance_size, swap_gate) swap_network_op = swap_network_gate(*qubit_order) moment = ops.Moment([swap_network_op]) reflected = not reflected circuit._moments[moment_index] = moment return reflected
python
def show(self): """ Simulates switching the display mode ON; this is achieved by restoring the contrast to the level prior to the last time hide() was called. """ if self._prev_contrast is not None: self.contrast(self._prev_contrast) self._prev_contrast = None
python
def get_hosts(sld, tld): ''' Retrieves DNS host record settings for the requested domain. returns a dictionary of information about the requested domain sld SLD of the domain name tld TLD of the domain name CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.get_hosts sld tld ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.gethosts') opts['TLD'] = tld opts['SLD'] = sld response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return {} domaindnsgethostsresult = response_xml.getElementsByTagName('DomainDNSGetHostsResult')[0] return salt.utils.namecheap.xml_to_dict(domaindnsgethostsresult)
java
public static DateType parseV3(String theV3String) { DateType retVal = new DateType(); retVal.setValueAsV3String(theV3String); return retVal; }
java
public static SanitizedContent fromSafeScriptProto(SafeScriptProto script) { return SanitizedContent.create( SafeScripts.fromProto(script).getSafeScriptString(), ContentKind.JS); }
python
def create_public_room(self, name, **kwargs): """ Create room with given name :param name: Room name :param kwargs: members: The users to add to the channel when it is created. Optional; Ex.: ["rocket.cat"], Default: [] read_only: Set if the channel is read only or not. Optional; Ex.: True, Default: False :return: """ return CreatePublicRoom(settings=self.settings, **kwargs).call(name=name, **kwargs)
java
public void complete(Session session) { try { session.complete(); sessionCache.put(session.getId(), session); } catch (Exception e) { log.warn("Session failed to complete", e); } }
python
def main(): 'Main function. Handles delegation to other functions.' logging.basicConfig() type_choices = {'any': constants.PACKAGE_ANY, 'extension': constants.PACKAGE_EXTENSION, 'theme': constants.PACKAGE_THEME, 'dictionary': constants.PACKAGE_DICTIONARY, 'languagepack': constants.PACKAGE_LANGPACK, 'search': constants.PACKAGE_SEARCHPROV, 'multi': constants.PACKAGE_MULTI} # Parse the arguments that parser = argparse.ArgumentParser( description='Run tests on a Mozilla-type addon.') parser.add_argument('package', help="The path of the package you're testing") parser.add_argument('-t', '--type', default='any', choices=type_choices.keys(), help="Type of addon you assume you're testing", required=False) parser.add_argument('-o', '--output', default='text', choices=('text', 'json'), help='The output format that you expect', required=False) parser.add_argument('-v', '--verbose', action='store_const', const=True, help="""If the output format supports it, makes the analysis summary include extra info.""") parser.add_argument('--boring', action='store_const', const=True, help="""Activating this flag will remove color support from the terminal.""") parser.add_argument('--determined', action='store_const', const=True, help="""This flag will continue running tests in successive tests even if a lower tier fails.""") parser.add_argument('--selfhosted', action='store_const', const=True, help="""Indicates that the addon will not be hosted on addons.mozilla.org. This allows the <em:updateURL> element to be set.""") parser.add_argument('--approved_applications', help="""A JSON file containing acceptable applications and their versions""", required=False) parser.add_argument('--target-maxversion', help="""JSON string to override the package's targetapp_maxVersion for validation. The JSON object should be a dict of versions keyed by application GUID. For example, setting a package's max Firefox version to 5.*: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "5.*"} """) parser.add_argument('--target-minversion', help="""JSON string to override the package's targetapp_minVersion for validation. The JSON object should be a dict of versions keyed by application GUID. For example, setting a package's min Firefox version to 5.*: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": "5.*"} """) parser.add_argument('--for-appversions', help="""JSON string to run validation tests for compatibility with a specific app/version. The JSON object should be a dict of version lists keyed by application GUID. For example, running Firefox 6.* compatibility tests: {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": ["6.*"]} """) parser.add_argument('--timeout', help='The amount of time before validation is ' 'terminated with a timeout exception.', default='60') args = parser.parse_args() # We want to make sure that the output is expected. Parse out the expected # type for the add-on and pass it in for validation. if args.type not in type_choices: # Fail if the user provided invalid input. print 'Given expectation (%s) not valid. See --help for details' % \ args.type sys.exit(1) overrides = {} if args.target_minversion: overrides['targetapp_minVersion'] = json.loads(args.target_minversion) if args.target_maxversion: overrides['targetapp_maxVersion'] = json.loads(args.target_maxversion) for_appversions = None if args.for_appversions: for_appversions = json.loads(args.for_appversions) try: timeout = int(args.timeout) except ValueError: print 'Invalid timeout. Integer expected.' sys.exit(1) expectation = type_choices[args.type] error_bundle = validate(args.package, format=None, approved_applications=args.approved_applications, determined=args.determined, listed=not args.selfhosted, overrides=overrides, for_appversions=for_appversions, expectation=expectation, timeout=timeout) # Print the output of the tests based on the requested format. if args.output == 'text': print error_bundle.print_summary(verbose=args.verbose, no_color=args.boring).encode('utf-8') elif args.output == 'json': sys.stdout.write(error_bundle.render_json()) if error_bundle.failed(): sys.exit(1) else: sys.exit(0)
java
@BetaApi public final Operation insertRegionCommitment(String region, Commitment commitmentResource) { InsertRegionCommitmentHttpRequest request = InsertRegionCommitmentHttpRequest.newBuilder() .setRegion(region) .setCommitmentResource(commitmentResource) .build(); return insertRegionCommitment(request); }
python
def _verify_shape_bounds(shape, bounds): """Verify that shape corresponds to bounds apect ratio.""" if not isinstance(shape, (tuple, list)) or len(shape) != 2: raise TypeError( "shape must be a tuple or list with two elements: %s" % str(shape) ) if not isinstance(bounds, (tuple, list)) or len(bounds) != 4: raise TypeError( "bounds must be a tuple or list with four elements: %s" % str(bounds) ) shape = Shape(*shape) bounds = Bounds(*bounds) shape_ratio = shape.width / shape.height bounds_ratio = (bounds.right - bounds.left) / (bounds.top - bounds.bottom) if abs(shape_ratio - bounds_ratio) > DELTA: min_length = min([ (bounds.right - bounds.left) / shape.width, (bounds.top - bounds.bottom) / shape.height ]) proposed_bounds = Bounds( bounds.left, bounds.bottom, bounds.left + shape.width * min_length, bounds.bottom + shape.height * min_length ) raise ValueError( "shape ratio (%s) must equal bounds ratio (%s); try %s" % ( shape_ratio, bounds_ratio, proposed_bounds ) )
java
private void obtainWindowBackground(@StyleRes final int themeResourceId) { TypedArray typedArray = getContext().getTheme().obtainStyledAttributes(themeResourceId, new int[]{R.attr.materialDialogWindowBackground}); int resourceId = typedArray.getResourceId(0, 0); if (resourceId != 0) { setWindowBackground(resourceId); } else { setWindowBackground(R.drawable.material_dialog_background); } }
python
def main(vocab_path: str, elmo_config_path: str, elmo_weights_path: str, output_dir: str, batch_size: int, device: int, use_custom_oov_token: bool = False): """ Creates ELMo word representations from a vocabulary file. These word representations are _independent_ - they are the result of running the CNN and Highway layers of the ELMo model, but not the Bidirectional LSTM. ELMo requires 2 additional tokens: <S> and </S>. The first token in this file is assumed to be an unknown token. This script produces two artifacts: A new vocabulary file with the <S> and </S> tokens inserted and a glove formatted embedding file containing word : vector pairs, one per line, with all values separated by a space. """ # Load the vocabulary words and convert to char ids with open(vocab_path, 'r') as vocab_file: tokens = vocab_file.read().strip().split('\n') # Insert the sentence boundary tokens which elmo uses at positions 1 and 2. if tokens[0] != DEFAULT_OOV_TOKEN and not use_custom_oov_token: raise ConfigurationError("ELMo embeddings require the use of a OOV token.") tokens = [tokens[0]] + ["<S>", "</S>"] + tokens[1:] indexer = ELMoTokenCharactersIndexer() indices = indexer.tokens_to_indices([Token(token) for token in tokens], Vocabulary(), "indices")["indices"] sentences = [] for k in range((len(indices) // 50) + 1): sentences.append(indexer.pad_token_sequence(indices[(k * 50):((k + 1) * 50)], desired_num_tokens=50, padding_lengths={})) last_batch_remainder = 50 - (len(indices) % 50) if device != -1: elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path).cuda(device) else: elmo_token_embedder = _ElmoCharacterEncoder(elmo_config_path, elmo_weights_path) all_embeddings = [] for i in range((len(sentences) // batch_size) + 1): array = numpy.array(sentences[i * batch_size: (i + 1) * batch_size]) if device != -1: batch = torch.from_numpy(array).cuda(device) else: batch = torch.from_numpy(array) token_embedding = elmo_token_embedder(batch)['token_embedding'].data # Reshape back to a list of words of shape (batch_size * 50, encoding_dim) # We also need to remove the <S>, </S> tokens appended by the encoder. per_word_embeddings = token_embedding[:, 1:-1, :].contiguous().view(-1, token_embedding.size(-1)) all_embeddings.append(per_word_embeddings) # Remove the embeddings associated with padding in the last batch. all_embeddings[-1] = all_embeddings[-1][:-last_batch_remainder, :] embedding_weight = torch.cat(all_embeddings, 0).cpu().numpy() # Write out the embedding in a glove format. os.makedirs(output_dir, exist_ok=True) with gzip.open(os.path.join(output_dir, "elmo_embeddings.txt.gz"), 'wb') as embeddings_file: for i, word in enumerate(tokens): string_array = " ".join([str(x) for x in list(embedding_weight[i, :])]) embeddings_file.write(f"{word} {string_array}\n".encode('utf-8')) # Write out the new vocab with the <S> and </S> tokens. _, vocab_file_name = os.path.split(vocab_path) with open(os.path.join(output_dir, vocab_file_name), "w") as new_vocab_file: for word in tokens: new_vocab_file.write(f"{word}\n")
java
public void marshall(IpRouteInfo ipRouteInfo, ProtocolMarshaller protocolMarshaller) { if (ipRouteInfo == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(ipRouteInfo.getDirectoryId(), DIRECTORYID_BINDING); protocolMarshaller.marshall(ipRouteInfo.getCidrIp(), CIDRIP_BINDING); protocolMarshaller.marshall(ipRouteInfo.getIpRouteStatusMsg(), IPROUTESTATUSMSG_BINDING); protocolMarshaller.marshall(ipRouteInfo.getAddedDateTime(), ADDEDDATETIME_BINDING); protocolMarshaller.marshall(ipRouteInfo.getIpRouteStatusReason(), IPROUTESTATUSREASON_BINDING); protocolMarshaller.marshall(ipRouteInfo.getDescription(), DESCRIPTION_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public Principal getCallerPrincipal(boolean useRealm, String realm, boolean web, boolean isJaspiEnabled) { Subject subject = subjectManager.getCallerSubject(); if (subject == null) { return null; } SubjectHelper subjectHelper = new SubjectHelper(); if (subjectHelper.isUnauthenticated(subject) && web) { return null; } if (isJaspiEnabled) { Principal principal = getPrincipalFromWSCredential(subjectHelper, subject); if (principal != null) { return principal; } } String securityName = getSecurityNameFromWSCredential(subjectHelper, subject); if (securityName == null) { return null; } Principal jsonWebToken = MpJwtHelper.getJsonWebTokenPricipal(subject); if (jsonWebToken != null) { return jsonWebToken; } Set<WSPrincipal> principals = subject.getPrincipals(WSPrincipal.class); if (principals.size() > 1) { multiplePrincipalsError(principals); } WSPrincipal wsPrincipal = null; if (!principals.isEmpty()) { String principalName = createPrincipalName(useRealm, realm, securityName); wsPrincipal = principals.iterator().next(); wsPrincipal = new WSPrincipal(principalName, wsPrincipal.getAccessId(), wsPrincipal.getAuthenticationMethod()); } return wsPrincipal; }
python
def run(self, postfunc=lambda: None): """Run the jobs. postfunc() will be invoked after the jobs has run. It will be invoked even if the jobs are interrupted by a keyboard interrupt (well, in fact by a signal such as either SIGINT, SIGTERM or SIGHUP). The execution of postfunc() is protected against keyboard interrupts and is guaranteed to run to completion.""" self._setup_sig_handler() try: self.job.start() finally: postfunc() self._reset_sig_handler()
python
def fetch_build_eggs(self, requires): """Resolve pre-setup requirements""" resolved_dists = pkg_resources.working_set.resolve( pkg_resources.parse_requirements(requires), installer=self.fetch_build_egg, replace_conflicting=True, ) for dist in resolved_dists: pkg_resources.working_set.add(dist, replace=True) return resolved_dists
java
public DeletePresetResponse deletePreset(DeletePresetRequest request) { checkNotNull(request, "The parameter request should NOT be null."); checkStringNotEmpty(request.getName(), "The parameter name should NOT be null or empty string."); InternalRequest internalRequest = createRequest(HttpMethodName.DELETE, request, LIVE_PRESET, request.getName()); return invokeHttpClient(internalRequest, DeletePresetResponse.class); }
python
def get_peer(self, name, peer_type="REPLICATION"): """ Retrieve a replication peer by name. @param name: The name of the peer. @param peer_type: Added in v11. The type of the peer. Defaults to 'REPLICATION'. @return: The peer. @since: API v3 """ params = self._get_peer_type_param(peer_type) return self._get("peers/" + name, ApiCmPeer, params=params, api_version=3)
python
def _run_default_moderator(comment, content_object, request): """ Run the default moderator """ # The default moderator will likely not check things like "auto close". # It can still provide akismet and bad word checking. if not default_moderator.allow(comment, content_object, request): # Comment will be disallowed outright (HTTP 403 response) return False if default_moderator.moderate(comment, content_object, request): comment.is_public = False
java
private void initializeExtension(InstalledExtension installedExtension, String namespaceToLoad, Map<String, Set<InstalledExtension>> initializedExtensions) throws ExtensionException { if (installedExtension.getNamespaces() != null) { if (namespaceToLoad == null) { for (String namespace : installedExtension.getNamespaces()) { initializeExtensionInNamespace(installedExtension, namespace, initializedExtensions); } } else if (installedExtension.getNamespaces().contains(namespaceToLoad)) { initializeExtensionInNamespace(installedExtension, namespaceToLoad, initializedExtensions); } } else if (namespaceToLoad == null) { initializeExtensionInNamespace(installedExtension, null, initializedExtensions); } }
python
def remove_nio(self, nio): """ Removes the specified NIO as member of this bridge. :param nio: NIO instance to remove """ if self._hypervisor: yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio)) self._nios.remove(nio)
python
def calculate_hmac(cls, params): """ Calculate the HMAC of the given parameters in line with Shopify's rules for OAuth authentication. See http://docs.shopify.com/api/authentication/oauth#verification. """ encoded_params = cls.__encoded_params_for_signature(params) # Generate the hex digest for the sorted parameters using the secret. return hmac.new(cls.secret.encode(), encoded_params.encode(), sha256).hexdigest()
java
private boolean isGZip(byte[] bytes) { if(bytes == null || bytes.length == 0) { return false; } // refer http://www.gzip.org/zlib/rfc-gzip.html#file-format for magic numbers if(bytes[0] == 31 && (bytes[1] == 0x8b || bytes[1] == -117)) { return true; } return false; }
java
private CloseableDataStore asCloseableDataStore(final DataStore dataStore, final Optional<Runnable> onClose) { return (CloseableDataStore) Proxy.newProxyInstance( DataStore.class.getClassLoader(), new Class[] { CloseableDataStore.class }, new AbstractInvocationHandler() { @Override protected Object handleInvocation(Object proxy, Method method, Object[] args) throws Throwable { if ("close".equals(method.getName())) { if (onClose.isPresent()) { onClose.get().run(); } return null; } else { return method.invoke(dataStore, args); } } }); }
python
def _indent_decor(lbl): """ does the actual work of indent_func """ def closure_indent(func): if util_arg.TRACE: @ignores_exc_tb(outer_wrapper=False) #@wraps(func) def wrp_indent(*args, **kwargs): with util_print.Indenter(lbl): print(' ...trace[in]') ret = func(*args, **kwargs) print(' ...trace[out]') return ret else: @ignores_exc_tb(outer_wrapper=False) #@wraps(func) def wrp_indent(*args, **kwargs): with util_print.Indenter(lbl): ret = func(*args, **kwargs) return ret wrp_indent_ = ignores_exc_tb(wrp_indent) wrp_indent_ = preserve_sig(wrp_indent, func) return wrp_indent_ return closure_indent
python
def deploy_war(war, context, force='no', url='http://localhost:8080/manager', saltenv='base', timeout=180, temp_war_location=None, version=True): ''' Deploy a WAR file war absolute path to WAR file (should be accessible by the user running tomcat) or a path supported by the salt.modules.cp.get_file function context the context path to deploy force : False set True to deploy the webapp even one is deployed in the context url : http://localhost:8080/manager the URL of the server manager webapp saltenv : base the environment for WAR file in used by salt.modules.cp.get_url function timeout : 180 timeout for HTTP request temp_war_location : None use another location to temporarily copy to war file by default the system's temp directory is used version : '' Specify the war version. If this argument is provided, it overrides the version encoded in the war file name, if one is present. Examples: .. code-block:: bash salt '*' tomcat.deploy_war salt://salt-2015.8.6.war version=2015.08.r6 .. versionadded:: 2015.8.6 CLI Examples: cp module .. code-block:: bash salt '*' tomcat.deploy_war salt://application.war /api salt '*' tomcat.deploy_war salt://application.war /api no salt '*' tomcat.deploy_war salt://application.war /api yes http://localhost:8080/manager minion local file system .. code-block:: bash salt '*' tomcat.deploy_war /tmp/application.war /api salt '*' tomcat.deploy_war /tmp/application.war /api no salt '*' tomcat.deploy_war /tmp/application.war /api yes http://localhost:8080/manager ''' # Decide the location to copy the war for the deployment tfile = 'salt.{0}'.format(os.path.basename(war)) if temp_war_location is not None: if not os.path.isdir(temp_war_location): return 'Error - "{0}" is not a directory'.format(temp_war_location) tfile = os.path.join(temp_war_location, tfile) else: tfile = os.path.join(tempfile.gettempdir(), tfile) # Copy file name if needed cache = False if not os.path.isfile(war): cache = True cached = __salt__['cp.get_url'](war, tfile, saltenv) if not cached: return 'FAIL - could not cache the WAR file' try: __salt__['file.set_mode'](cached, '0644') except KeyError: pass else: tfile = war # Prepare options opts = { 'war': 'file:{0}'.format(tfile), 'path': context, } # If parallel versions are desired or not disabled if version: # Set it to defined version or attempt extract version = extract_war_version(war) if version is True else version if isinstance(version, _string_types): # Only pass version to Tomcat if not undefined opts['version'] = version if force == 'yes': opts['update'] = 'true' # Deploy deployed = _wget('deploy', opts, url, timeout=timeout) res = '\n'.join(deployed['msg']) # Cleanup if cache: __salt__['file.remove'](tfile) return res
python
def key_string_to_lens_path(key_string): """ Converts a key string like 'foo.bar.0.wopper' to ['foo', 'bar', 0, 'wopper'] :param {String} keyString The dot-separated key string :return {[String]} The lens array containing string or integers """ return map( if_else( isinstance(int), # convert to int lambda s: int(s), # Leave the string alone identity ), key_string.split('.') )
java
public void runPostCrawlingPlugins(CrawlSession session, ExitStatus exitReason) { LOGGER.debug("Running PostCrawlingPlugins..."); counters.get(PostCrawlingPlugin.class).inc(); for (Plugin plugin : plugins.get(PostCrawlingPlugin.class)) { if (plugin instanceof PostCrawlingPlugin) { try { LOGGER.debug("Calling plugin {}", plugin); ((PostCrawlingPlugin) plugin).postCrawling(session, exitReason); } catch (RuntimeException e) { reportFailingPlugin(plugin, e); } } } }
python
def load_training_vector(response_shapes, explanatory_rasters, response_field, metric='mean'): """ Parameters ---------- response_shapes : Source of vector features for raster_stats; can be OGR file path or iterable of geojson-like features response_field : Field name containing the known response category (must be numeric) explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables metric : Statistic to aggregate explanatory data across line and polygon vector features Defaults to 'mean' (optional) Returns ------- train_xs : Array of explanatory variables train_y : 1xN array of known responses """ from rasterstats import zonal_stats all_means = [] all_zones = None for i, raster in enumerate(explanatory_rasters): logger.debug("Rasters stats on %s" % raster) stats = zonal_stats(response_shapes, raster, stats=metric, prefix="pyimpute_", geojson_out=True) zones = [x['properties'][response_field] for x in stats] if all_zones: assert zones == all_zones else: all_zones = zones means = [x['properties']['pyimpute_' + metric] for x in stats] all_means.append(means) train_y = np.array(all_zones) train_xs = np.array(all_means).T return train_xs, train_y
python
def service_info(self, short_name): """Get static information about a service. Args: short_name (string): The short name of the service to query Returns: dict: A dictionary with the long_name and preregistered info on this service. """ if short_name not in self.services: raise ArgumentError("Unknown service name", short_name=short_name) info = {} info['short_name'] = short_name info['long_name'] = self.services[short_name]['state'].long_name info['preregistered'] = self.services[short_name]['state'].preregistered return info
python
def add_intercept_term(self, x): """ Adds a column of ones to estimate the intercept term for separation boundary """ nr_x,nr_f = x.shape intercept = np.ones([nr_x,1]) x = np.hstack((intercept,x)) return x
python
def __stop(self): # pragma: no cover """Stops the background engine.""" if not self.dispatcher_thread: return logger.info('Stopping dispatcher') self.running = False # graceful shutdown self.dispatcher_thread.join() self.dispatcher_thread = None
java
public void addInjectionTarget(Member member) throws InjectionException { final boolean isTraceOn = TraceComponent.isAnyTracingEnabled(); if (isTraceOn && tc.isEntryEnabled()) Tr.entry(tc, "addInjectionTarget: " + member); // ----------------------------------------------------------------------- // First, determine if the target being added is already in the list. // This may occur if the target is specified in both XML and annotations // (an override) or if multiple interceptors inherit from a base class // where the base class contains an injection annotation. d457733 // // Next, determine if the customer has incorrectly attempted to inject // into both the field and corresponding java beans property method. // This is not allowed per the JavaEE, EJB 3.0 Specifications. d447011.2 // // Finally, note that if the XML target resulted in the method, but the // annotation is on the field, then the XML should really be considered // to be overriding the field injection, so don't throw an exception // and instead just remove the previous target from XML. d510950 // ----------------------------------------------------------------------- boolean containsTarget = false; String existingMemberName = null; String toAddMemberName = null; InjectionTarget injectionTarget = null; if (ivInjectionTargets != null) { for (InjectionTarget target : ivInjectionTargets) { Member targetMember = target.getMember(); if (targetMember.equals(member)) { // Reset 'fromXML' since it also matches an annotation. Reseting // this allows us to detect when they have annotations on both // the field and set method. PK92087 target.ivFromXML = false; // Already in list, break out and check for error below, or ignore if (isTraceOn && tc.isDebugEnabled()) Tr.debug(tc, "found: " + target); injectionTarget = target; // save for trace d643444 containsTarget = true; break; } // If both are from the same class, then check for 'double' injection // into both the field and method for a 'property. d510950 if (targetMember.getDeclaringClass() == member.getDeclaringClass()) { // Obtain the 'property' method name from the existing target if (targetMember instanceof Method) { existingMemberName = targetMember.getName(); } else { existingMemberName = getMethodFromProperty(targetMember.getName()); } // Obtain the 'property' method name from the target being added if (member instanceof Method) { toAddMemberName = member.getName(); } else { toAddMemberName = getMethodFromProperty(member.getName()); } // When equal, injection has been specified for both field an method. if (existingMemberName.equals(toAddMemberName)) { if (target.ivFromXML) { // If the existing one came from XML, then it must be the // method, but is really intended to be an override of the // field annotation... so just remove the method target // and let the field target be added below. d510950 if (isTraceOn && tc.isDebugEnabled()) Tr.debug(tc, "removing: " + targetMember.getName()); ivInjectionTargets.remove(target); break; } // Annotation present on both the field and method... error. Tr.error(tc, "INJECTION_DECLARED_IN_BOTH_THE_FIELD_AND_METHOD_OF_A_BEAN_CWNEN0056E", ivJndiName, member.getDeclaringClass().getName(), ivNameSpaceConfig.getModuleName(), ivNameSpaceConfig.getApplicationName()); throw new InjectionConfigurationException("Injection of the " + ivJndiName + " resource was specified for both a property instance" + " variable and its corresponding set method on the " + member.getDeclaringClass().getName() + " class in the " + ivNameSpaceConfig.getModuleName() + " module of the " + ivNameSpaceConfig.getApplicationName() + " application."); } } } } // ----------------------------------------------------------------------- // The following is stated in the EJB Specification overriding rules: // // The injection target, if specified, must name exactly the annotated // field or property method. // // Previously (EJB 3.0 Feature Pack and WAS 7.0) this was interpreted to // mean that if any targets were specified in XML, then the XML must // contain targets for every annotation. So, if the target being added // due to an annotation is NOT present in the list, then it was not // specified in XML, which would have been an error. WAS did support // adding additional targets from XML, but there had to also be a target // for every annotation. // // Since that time, it has been realized that this can be quite annoying // to customers, as they must duplicate annotation information just to // specify an extra injection target. Also, this could be quite difficult // to enforce for EJBs in WAR modules, where targets and EJB references // can now be defined in many different locations. // // Beginning in WAS 8.0, the above rule is now being interpreted to mean // only that if a target in XML matches an annotation, then that target // is considered an override of that annotation and has really very // little effect. Additional targets in XML are not considered overrides // of any annotations, and are just additional targets. Thus, targets // from XML do not globally replace all targets from annotations, but // just add to them. d643444 // // Also note that the rule indicates it is an override when they are an // exact match, so if the target was in XML, but was for a set method, // and the annotation was for the field... then it is assumed the XML // target was really the field (there is no way to indicate field in XML), // so the above code would have removed the method target, and will // expect the code below to add the field target; this is not an error. // To not assume these are an exact match would otherwise always result // in an error, since injection may not occur into both the field and // corresponding set method. // // Otherwise, if the XML specified no targets, then this is just an add // due to an annotation. As long as the target is not already in the // list, then just add it (i.e. no duplicates). // // In this scenario, the target may already be in the list if multiple // classes being injected both inherit from the same base class. The // target will only be added once, and later will be 'collected' into // the InjectionTarget arrays for both subclasses. d457733 // ----------------------------------------------------------------------- if (!containsTarget) { if (member instanceof Field) { injectionTarget = new InjectionTargetField((Field) member, this); } else { injectionTarget = createInjectionTarget((Method) member, this); } injectionTarget.setInjectionBinding(this); addInjectionTarget(injectionTarget); } if (isTraceOn && tc.isEntryEnabled()) Tr.exit(tc, "addInjectionTarget : " + ((containsTarget) ? "(duplicate) " : "") + injectionTarget); }
java
@Override public CallSequence get(int index) { if (indics == null) { size(); } Pair<Integer, Integer> v = indics.get(index); int r = v.getFirst(); if (r == 0) { CallSequence seq = getVars(); CallStatement skip = new CallStatement() { public Object execute() { return Utils.VOID_VALUE; } @Override public String toString() { return "skip"; } }; seq.add(skip); return seq; } else { TestSequence rtests = repeat.getTests(); int count = rtests.size(); int[] c = new int[r]; for (int i = 0; i < r; i++) { c[i] = count; } Permutor p = new Permutor(c); int[] select = null; for (int i = 0; i < v.getSecond(); i++) { select = p.next(); } CallSequence seq = getVars(); for (int i = 0; i < r; i++) { seq.addAll(rtests.get(select[i])); } return seq; } }
python
def add_icon_widget(self, ref, x=1, y=1, name="heart"): """ Add Icon Widget """ if ref not in self.widgets: widget = IconWidget(screen=self, ref=ref, x=x, y=y, name=name) self.widgets[ref] = widget return self.widgets[ref]
python
def scores(factors): """ Computes the score of temperaments and elements. """ temperaments = { const.CHOLERIC: 0, const.MELANCHOLIC: 0, const.SANGUINE: 0, const.PHLEGMATIC: 0 } qualities = { const.HOT: 0, const.COLD: 0, const.DRY: 0, const.HUMID: 0 } for factor in factors: element = factor['element'] # Score temperament temperament = props.base.elementTemperament[element] temperaments[temperament] += 1 # Score qualities tqualities = props.base.temperamentQuality[temperament] qualities[tqualities[0]] += 1 qualities[tqualities[1]] += 1 return { 'temperaments': temperaments, 'qualities': qualities }
python
def pprint_tree_differences(self, missing_pys, missing_docs): """Pprint the missing files of each given set. :param set missing_pys: The set of missing Python files. :param set missing_docs: The set of missing documentation files. :rtype: None """ if missing_pys: print('The following Python files appear to be missing:') for pyfile in missing_pys: print(pyfile) print('\n') if missing_docs: print('The following documentation files appear to be missing:') for docfiile in missing_docs: print(docfiile) print('\n')
python
def get_calculated_aes(aesthetics): """ Return a list of the aesthetics that are calculated """ calculated_aesthetics = [] for name, value in aesthetics.items(): if is_calculated_aes(value): calculated_aesthetics.append(name) return calculated_aesthetics
python
def complexity_entropy_multiscale(signal, max_scale_factor=20, m=2, r="default"): """ Computes the Multiscale Entropy. Uses sample entropy with 'chebychev' distance. Parameters ---------- signal : list or array List or array of values. max_scale_factor: int Max scale factor (*tau*). The max length of coarse-grained time series analyzed. Will analyze scales for all integers from 1:max_scale_factor. See Costa (2005). m : int The embedding dimension (*m*, the length of vectors to compare). r : float Similarity factor *r*. Distance threshold for two template vectors to be considered equal. Default is 0.15*std(signal). Returns ---------- mse: dict A dict containing "MSE_Parameters" (a dict with the actual max_scale_factor, m and r), "MSE_Values" (an array with the sample entropy for each scale_factor up to the max_scale_factor), "MSE_AUC" (A float: The area under the MSE_Values curve. A point-estimate of mse) and "MSE_Sum" (A float: The sum of MSE_Values curve. Another point-estimate of mse; Norris, 2008). Example ---------- >>> import neurokit as nk >>> >>> signal = np.sin(np.log(np.random.sample(666))) >>> mse = nk.complexity_entropy_multiscale(signal) >>> mse_values = mse["MSE_Values"] Notes ---------- *Details* - **multiscale entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content. Multiscale entropy (MSE) analysis is a new method of measuring the complexity of coarse grained versions of the original data, where coarse graining is at all scale factors from 1:max_scale_factor. *Authors* - tjugo (https://github.com/nikdon) - Dominique Makowski (https://github.com/DominiqueMakowski) - Anthony Gatti (https://github.com/gattia) *Dependencies* - numpy - nolds *See Also* - pyEntropy package: https://github.com/nikdon/pyEntropy References ----------- - Richman, J. S., & Moorman, J. R. (2000). Physiological time-series analysis using approximate entropy and sample entropy. American Journal of Physiology-Heart and Circulatory Physiology, 278(6), H2039-H2049. - Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological signals. Physical review E, 71(2), 021906. - Gow, B. J., Peng, C. K., Wayne, P. M., & Ahn, A. C. (2015). Multiscale entropy analysis of center-of-pressure dynamics in human postural control: methodological considerations. Entropy, 17(12), 7926-7947. - Norris, P. R., Anderson, S. M., Jenkins, J. M., Williams, A. E., & Morris Jr, J. A. (2008). Heart rate multiscale entropy at three hours predicts hospital mortality in 3,154 trauma patients. Shock, 30(1), 17-22. """ if r == "default": r = 0.15*np.std(signal) n = len(signal) per_scale_entropy_values = np.zeros(max_scale_factor) # Compute SampEn for all scale factors for i in range(max_scale_factor): b = int(np.fix(n / (i + 1))) temp_ts = [0] * int(b) for j in range(b): num = sum(signal[j * (i + 1): (j + 1) * (i + 1)]) den = i + 1 temp_ts[j] = float(num) / float(den) se = nolds.sampen(temp_ts, m, r, nolds.measures.rowwise_chebyshev, debug_plot=False, plot_file=None) if np.isinf(se): print("NeuroKit warning: complexity_entropy_multiscale(): Signal might be to short to compute SampEn for scale factors > " + str(i) + ". Setting max_scale_factor to " + str(i) + ".") max_scale_factor = i break else: per_scale_entropy_values[i] = se all_entropy_values = per_scale_entropy_values[0:max_scale_factor] # Compute final indices parameters = {"max_scale_factor": max_scale_factor, "r": r, "m": m} mse = {"MSE_Parameters": parameters, "MSE_Values" : all_entropy_values, "MSE_AUC": np.trapz(all_entropy_values), "MSE_Sum": np.sum(all_entropy_values)} return (mse)
java
public BoardingPassBuilder addBoardingPass(String passengerName, String pnrNumber, String logoImageUrl, String aboveBarCodeImageUrl) { return new BoardingPassBuilder(this, passengerName, pnrNumber, logoImageUrl, aboveBarCodeImageUrl); }
python
def data_filler_simple_registration(self, number_of_rows, conn): '''creates and fills the table with simple regis. information ''' cursor = conn.cursor() cursor.execute(''' CREATE TABLE simple_registration(id TEXT PRIMARY KEY, email TEXT , password TEXT) ''') conn.commit() multi_lines = [] try: for i in range(0, number_of_rows): multi_lines.append((rnd_id_generator(self), self.faker.safe_email(), self.faker.md5(raw_output=False))) cursor.executemany('insert into simple_registration values(?,?,?)',multi_lines) conn.commit() logger.warning('simple_registration Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
python
def populateFromFile(self, dataUrl, indexFile=None): """ Populates the instance variables of this ReadGroupSet from the specified dataUrl and indexFile. If indexFile is not specified guess usual form. """ self._dataUrl = dataUrl self._indexFile = indexFile if indexFile is None: self._indexFile = dataUrl + ".bai" samFile = self.getFileHandle(self._dataUrl) self._setHeaderFields(samFile) if 'RG' not in samFile.header or len(samFile.header['RG']) == 0: readGroup = HtslibReadGroup(self, self.defaultReadGroupName) self.addReadGroup(readGroup) else: for readGroupHeader in samFile.header['RG']: readGroup = HtslibReadGroup(self, readGroupHeader['ID']) readGroup.populateFromHeader(readGroupHeader) self.addReadGroup(readGroup) self._bamHeaderReferenceSetName = None for referenceInfo in samFile.header['SQ']: if 'AS' not in referenceInfo: infoDict = parseMalformedBamHeader(referenceInfo) else: infoDict = referenceInfo name = infoDict.get('AS', references.DEFAULT_REFERENCESET_NAME) if self._bamHeaderReferenceSetName is None: self._bamHeaderReferenceSetName = name elif self._bamHeaderReferenceSetName != name: raise exceptions.MultipleReferenceSetsInReadGroupSet( self._dataUrl, name, self._bamFileReferenceName) self._numAlignedReads = samFile.mapped self._numUnalignedReads = samFile.unmapped
python
def register(self, key_or_tag, f_val): """Register a custom transit tag and decoder/parser function for use during reads. """ self.reader.decoder.register(key_or_tag, f_val)
java
public java.util.List<VpcEndpointConnection> getVpcEndpointConnections() { if (vpcEndpointConnections == null) { vpcEndpointConnections = new com.amazonaws.internal.SdkInternalList<VpcEndpointConnection>(); } return vpcEndpointConnections; }
java
public List<String> selectJars(String targetPath) { List<String> jarPaths = new ArrayList<String>(); if (targetPath == null) { return jarPaths; } File targetDir = new File(targetPath); if (!UtilImpl_FileUtils.exists(targetDir)) { return jarPaths; } File[] targetFiles = UtilImpl_FileUtils.listFiles(targetDir); if (targetFiles != null) { for (File nextTargetFile : targetFiles) { String nextTargetName = nextTargetFile.getName(); if (nextTargetName.toUpperCase().endsWith(".JAR")) { String nextTargetPath = targetPath + "/" + nextTargetName; jarPaths.add(nextTargetPath); } } } return jarPaths; }
python
def clicked(self, px, py): '''see if the image has been clicked on''' if self.hidden: return None if (abs(px - self.posx) > self.width/2 or abs(py - self.posy) > self.height/2): return None return math.sqrt((px-self.posx)**2 + (py-self.posy)**2)
java
public boolean renameFile(@NotNull final Transaction txn, @NotNull final File origin, @NotNull final String newPath) { final ArrayByteIterable key = StringBinding.stringToEntry(newPath); final ByteIterable value = pathnames.get(txn, key); if (value != null) { return false; } final File newFile = new File(newPath, origin.getDescriptor(), origin.getCreated(), System.currentTimeMillis()); pathnames.put(txn, key, newFile.toByteIterable()); pathnames.delete(txn, StringBinding.stringToEntry(origin.getPath())); return true; }
python
def split_field_path(path): """Split a field path into valid elements (without dots). Args: path (str): field path to be lexed. Returns: List(str): tokens Raises: ValueError: if the path does not match the elements-interspersed- with-dots pattern. """ if not path: return [] elements = [] want_dot = False for element in _tokenize_field_path(path): if want_dot: if element != ".": raise ValueError("Invalid path: {}".format(path)) else: want_dot = False else: if element == ".": raise ValueError("Invalid path: {}".format(path)) elements.append(element) want_dot = True if not want_dot or not elements: raise ValueError("Invalid path: {}".format(path)) return elements
python
def _hash_html_blocks(self, text, raw=False): """Hashify HTML blocks We only want to do this for block-level HTML tags, such as headers, lists, and tables. That's because we still want to wrap <p>s around "paragraphs" that are wrapped in non-block-level tags, such as anchors, phrase emphasis, and spans. The list of tags we're looking for is hard-coded. @param raw {boolean} indicates if these are raw HTML blocks in the original source. It makes a difference in "safe" mode. """ if '<' not in text: return text # Pass `raw` value into our calls to self._hash_html_block_sub. hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) # First, look for nested blocks, e.g.: # <div> # <div> # tags for inner block must be indented. # </div> # </div> # # The outermost tags must start at the left margin for this to match, and # the inner nested divs must be indented. # We need to do this before the next, more liberal match, because the next # match will start at the first `<div>` and stop at the first `</div>`. text = self._strict_tag_block_re.sub(hash_html_block_sub, text) # Now match more liberally, simply from `\n<tag>` to `</tag>\n` text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) # Special case just for <hr />. It was easier to make a special # case than to make the other regex more complicated. if "<hr" in text: _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width) text = _hr_tag_re.sub(hash_html_block_sub, text) # Special case for standalone HTML comments: if "<!--" in text: start = 0 while True: # Delimiters for next comment block. try: start_idx = text.index("<!--", start) except ValueError: break try: end_idx = text.index("-->", start_idx) + 3 except ValueError: break # Start position for next comment block search. start = end_idx # Validate whitespace before comment. if start_idx: # - Up to `tab_width - 1` spaces before start_idx. for i in range(self.tab_width - 1): if text[start_idx - 1] != ' ': break start_idx -= 1 if start_idx == 0: break # - Must be preceded by 2 newlines or hit the start of # the document. if start_idx == 0: pass elif start_idx == 1 and text[0] == '\n': start_idx = 0 # to match minute detail of Markdown.pl regex elif text[start_idx-2:start_idx] == '\n\n': pass else: break # Validate whitespace after comment. # - Any number of spaces and tabs. while end_idx < len(text): if text[end_idx] not in ' \t': break end_idx += 1 # - Must be following by 2 newlines or hit end of text. if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): continue # Escape and hash (must match `_hash_html_block_sub`). html = text[start_idx:end_idx] if raw and self.safe_mode: html = self._sanitize_html(html) key = _hash_text(html) self.html_blocks[key] = html text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] if "xml" in self.extras: # Treat XML processing instructions and namespaced one-liner # tags as if they were block HTML tags. E.g., if standalone # (i.e. are their own paragraph), the following do not get # wrapped in a <p> tag: # <?foo bar?> # # <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/> _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) text = _xml_oneliner_re.sub(hash_html_block_sub, text) return text
java
public CloseableResource<FileSystemMasterClient> acquireMasterClientResource() { return new CloseableResource<FileSystemMasterClient>(mFileSystemMasterClientPool.acquire()) { @Override public void close() { mFileSystemMasterClientPool.release(get()); } }; }
python
def get_all_handleable_roots(self): """ Get list of all handleable devices, return only those that represent root nodes within the filtered device tree. """ nodes = self.get_device_tree() return [node.device for node in sorted(nodes.values(), key=DevNode._sort_key) if not node.ignored and node.device and (node.root == '/' or nodes[node.root].ignored)]
python
def triplify(binding): """ Recursively generate RDF statement triples from the data and schema supplied to the application. """ triples = [] if binding.data is None: return None, triples if binding.is_object: return triplify_object(binding) elif binding.is_array: for item in binding.items: _, item_triples = triplify(item) triples.extend(item_triples) return None, triples else: subject = binding.parent.subject triples.append((subject, binding.predicate, binding.object)) if binding.reverse is not None: triples.append((binding.object, binding.reverse, subject)) return subject, triples
java
@Override public void connectToResourceManager(@Nonnull ResourceManagerGateway resourceManagerGateway) { this.resourceManagerGateway = checkNotNull(resourceManagerGateway); // work on all slots waiting for this connection for (PendingRequest pendingRequest : waitingForResourceManager.values()) { requestSlotFromResourceManager(resourceManagerGateway, pendingRequest); } // all sent off waitingForResourceManager.clear(); }
java
public GroovyExpression generateArithmeticExpression(GroovyExpression left, String operator, GroovyExpression right) throws AtlasException { ArithmeticOperator op = ArithmeticOperator.lookup(operator); return new ArithmeticExpression(left, op, right); }
python
def save_params( self, f=None, f_params=None, f_optimizer=None, f_history=None): """Saves the module's parameters, history, and optimizer, not the whole object. To save the whole object, use pickle. ``f_params`` and ``f_optimizer`` uses PyTorchs' :func:`~torch.save`. Parameters ---------- f_params : file-like object, str, None (default=None) Path of module parameters. Pass ``None`` to not save f_optimizer : file-like object, str, None (default=None) Path of optimizer. Pass ``None`` to not save f_history : file-like object, str, None (default=None) Path to history. Pass ``None`` to not save f : deprecated Examples -------- >>> before = NeuralNetClassifier(mymodule) >>> before.save_params(f_params='model.pkl', >>> f_optimizer='optimizer.pkl', >>> f_history='history.json') >>> after = NeuralNetClassifier(mymodule).initialize() >>> after.load_params(f_params='model.pkl', >>> f_optimizer='optimizer.pkl', >>> f_history='history.json') """ # TODO: Remove warning in a future release if f is not None: warnings.warn( "f argument was renamed to f_params and will be removed " "in the next release. To make your code future-proof it is " "recommended to explicitly specify keyword arguments' names " "instead of relying on positional order.", DeprecationWarning) f_params = f if f_params is not None: if not hasattr(self, 'module_'): raise NotInitializedError( "Cannot save parameters of an un-initialized model. " "Please initialize first by calling .initialize() " "or by fitting the model with .fit(...).") torch.save(self.module_.state_dict(), f_params) if f_optimizer is not None: if not hasattr(self, 'optimizer_'): raise NotInitializedError( "Cannot save state of an un-initialized optimizer. " "Please initialize first by calling .initialize() " "or by fitting the model with .fit(...).") torch.save(self.optimizer_.state_dict(), f_optimizer) if f_history is not None: self.history.to_file(f_history)
java
@Override public SendTaskFailureResult sendTaskFailure(SendTaskFailureRequest request) { request = beforeClientExecution(request); return executeSendTaskFailure(request); }
java
@NonNull /*package*/ <T extends View> Parcelable saveInstanceState(@NonNull T target, @Nullable Parcelable state) { Injector.View<T> injector = safeGet(target, Injector.View.DEFAULT); return injector.save(target, state); }
java
public static String getIsolationLevelString(int level) { switch (level) { case Connection.TRANSACTION_NONE: return "NONE (" + level + ')'; case Connection.TRANSACTION_READ_UNCOMMITTED: return "READ UNCOMMITTED (" + level + ')'; case Connection.TRANSACTION_READ_COMMITTED: return "READ COMMITTED (" + level + ')'; case Connection.TRANSACTION_REPEATABLE_READ: return "REPEATABLE READ (" + level + ')'; case Connection.TRANSACTION_SERIALIZABLE: return "SERIALIZABLE (" + level + ')'; case TRANSACTION_SNAPSHOT: case TRANSACTION_SS_SNAPSHOT: return "SNAPSHOT (" + level + ')'; } return "UNKNOWN ISOLATION LEVEL CONSTANT (" + level + ')'; }
python
def make_encoder(self,formula_dict,inter_list,param_dict): """ make the encoder function """ X_dict = {} Xcol_dict = {} encoder_dict = {} # first, replace param_dict[key] = values, with param_dict[key] = dmatrix for key in formula_dict: encoding,arg = formula_dict[key] if 'Dev' in encoding: # make deviation encoded design matrix drop_name = arg # encode deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = deviation_encoder elif 'Dum' in encoding: # make dummy variable encoding design mat ref_name = arg dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = dummy_encoder elif 'Poly' in encoding: # make polynomial encoding design mat degree = arg polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key) # additionally, store in dictionary for use by interactions X_dict[key] = X_sub Xcol_dict[key] = colnames_sub # store dictionary of encoder functions to keep for prediction encoder_dict[key] = polynomial_encoder else: print encoding raise Exception("Encoding name error") # now compute interaction designmatrices for interaction in inter_list: if len(interaction) >= 3: raise Exception("Doesn't allow 4-way or higher interaction terms") elif len(interaction) == 3: param_name1 = interaction[0] param_name2 = interaction[1] param_name3 = interaction[2] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] col_names3 = Xcol_dict[param_name3] # make 3-way encoder function def threeway_encoder(param_name1,param_name2,param_name3, \ col_names1, col_names2, col_names3, X_dict): """ needs the three names of the parameters to be encoded, as well as a dictionary containing the already encoded single parameter design matrices, keyed by name """ X1 = X_dict[param_name1] X2 = X_dict[param_name2] X3 = X_dict[param_name3] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): for k in np.arange(0,X3.shape[1]): X_int.append(X1[:,i]*X2[:,j]*X3[:,k]) names_int.append(col_names1[i] + "*" + \ col_names2[j] + "*" + col_names3[k]) # make X_int from lists to np array X_int = np.array(X_int).T return X_int, names_int encoder_dict['threeway'] = threeway_encoder elif len(interaction) == 2: # there are two interaction terms (A*B) param_name1 = interaction[0] param_name2 = interaction[1] col_names1 = Xcol_dict[param_name1] col_names2 = Xcol_dict[param_name2] # make twoway_encoder function def twoway_encoder(param_name1,param_name2, col_names1, col_names2, X_dict): X1 = X_dict[param_name1] X2 = X_dict[param_name2] X_int = [] names_int = [] for i in np.arange(0,X1.shape[1]): for j in np.arange(0,X2.shape[1]): X_int.append(X1[:,i]*X2[:,j]) names_int.append(col_names1[i] + "*" + col_names2[j]) X_int = np.array(X_int).T return X_int, names_int encoder_dict['twoway'] = twoway_encoder else: raise Exception("Error while evaluating meaning of interaction term") # make key in encoder to specify which columns are active encoder_dict['trimmed_columns'] = self._trimmed_columns return encoder_dict
java
public static int cudaMemcpyArrayToArray(cudaArray dst, long wOffsetDst, long hOffsetDst, cudaArray src, long wOffsetSrc, long hOffsetSrc, long count, int cudaMemcpyKind_kind) { return checkResult(cudaMemcpyArrayToArrayNative(dst, wOffsetDst, hOffsetDst, src, wOffsetSrc, hOffsetSrc, count, cudaMemcpyKind_kind)); }
java
public static DateTimeField getInstance(DateTimeField field) { if (field == null) { return null; } if (field instanceof LenientDateTimeField) { field = ((LenientDateTimeField)field).getWrappedField(); } if (!field.isLenient()) { return field; } return new StrictDateTimeField(field); }
java
public static <T extends Enum<?>> T enumFromString(final String value, final T[] values) { if (value == null) { return null; } for (T v : values) { if (v.toString().equalsIgnoreCase(value)) { return v; } } return null; }
java
private List<TimeStep<State, Observation, Path>> createTimeSteps( List<Observation> filteredGPXEntries, List<Collection<QueryResult>> queriesPerEntry, QueryGraph queryGraph) { final int n = filteredGPXEntries.size(); if (queriesPerEntry.size() != n) { throw new IllegalArgumentException( "filteredGPXEntries and queriesPerEntry must have same size."); } final List<TimeStep<State, Observation, Path>> timeSteps = new ArrayList<>(); for (int i = 0; i < n; i++) { Observation gpxEntry = filteredGPXEntries.get(i); final Collection<QueryResult> queryResults = queriesPerEntry.get(i); List<State> candidates = new ArrayList<>(); for (QueryResult qr : queryResults) { int closestNode = qr.getClosestNode(); if (queryGraph.isVirtualNode(closestNode)) { // get virtual edges: List<VirtualEdgeIteratorState> virtualEdges = new ArrayList<>(); EdgeIterator iter = queryGraph.createEdgeExplorer().setBaseNode(closestNode); while (iter.next()) { if (!queryGraph.isVirtualEdge(iter.getEdge())) { throw new RuntimeException("Virtual nodes must only have virtual edges " + "to adjacent nodes."); } virtualEdges.add((VirtualEdgeIteratorState) queryGraph.getEdgeIteratorState(iter.getEdge(), iter.getAdjNode())); } if (virtualEdges.size() != 2) { throw new RuntimeException("Each virtual node must have exactly 2 " + "virtual edges (reverse virtual edges are not returned by the " + "EdgeIterator"); } // Create a directed candidate for each of the two possible directions through // the virtual node. This is needed to penalize U-turns at virtual nodes // (see also #51). We need to add candidates for both directions because // we don't know yet which is the correct one. This will be figured // out by the Viterbi algorithm. // // Adding further candidates to explicitly allow U-turns through setting // incomingVirtualEdge==outgoingVirtualEdge doesn't make sense because this // would actually allow to perform a U-turn without a penalty by going to and // from the virtual node through the other virtual edge or its reverse edge. VirtualEdgeIteratorState e1 = virtualEdges.get(0); VirtualEdgeIteratorState e2 = virtualEdges.get(1); for (int j = 0; j < 2; j++) { // get favored/unfavored edges: VirtualEdgeIteratorState incomingVirtualEdge = j == 0 ? e1 : e2; VirtualEdgeIteratorState outgoingVirtualEdge = j == 0 ? e2 : e1; // create candidate QueryResult vqr = new QueryResult(qr.getQueryPoint().lat, qr.getQueryPoint().lon); vqr.setQueryDistance(qr.getQueryDistance()); vqr.setClosestNode(qr.getClosestNode()); vqr.setWayIndex(qr.getWayIndex()); vqr.setSnappedPosition(qr.getSnappedPosition()); vqr.setClosestEdge(qr.getClosestEdge()); vqr.calcSnappedPoint(distanceCalc); State candidate = new State(gpxEntry, vqr, incomingVirtualEdge, outgoingVirtualEdge); candidates.add(candidate); } } else { // Create an undirected candidate for the real node. State candidate = new State(gpxEntry, qr); candidates.add(candidate); } } final TimeStep<State, Observation, Path> timeStep = new TimeStep<>(gpxEntry, candidates); timeSteps.add(timeStep); } return timeSteps; }
python
def preflight(self, program, start, stop, resolution=None, max_delay=None): """Preflight the given SignalFlow program and stream the output back.""" params = self._get_params(start=start, stop=stop, resolution=resolution, maxDelay=max_delay) def exec_fn(since=None): if since: params['start'] = since return self._transport.preflight(program, params) c = computation.Computation(exec_fn) self._computations.add(c) return c
python
def Parse(self, conditions, host_data): """Runs methods that evaluate whether collected host_data has an issue. Args: conditions: A list of conditions to determine which Methods to trigger. host_data: A map of artifacts and rdf data. Returns: A CheckResult populated with Anomalies if an issue exists. """ result = CheckResult(check_id=self.check_id) methods = self.SelectChecks(conditions) result.ExtendAnomalies([m.Parse(conditions, host_data) for m in methods]) return result
java
public OvhSecondaryDNS serviceName_secondaryDnsDomains_domain_GET(String serviceName, String domain) throws IOException { String qPath = "/vps/{serviceName}/secondaryDnsDomains/{domain}"; StringBuilder sb = path(qPath, serviceName, domain); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhSecondaryDNS.class); }
java
@CheckReturnValue @BackpressureSupport(BackpressureKind.FULL) @SchedulerSupport(SchedulerSupport.NONE) public final <U, V> Flowable<V> flatMapIterable(final Function<? super T, ? extends Iterable<? extends U>> mapper, final BiFunction<? super T, ? super U, ? extends V> resultSelector) { ObjectHelper.requireNonNull(mapper, "mapper is null"); ObjectHelper.requireNonNull(resultSelector, "resultSelector is null"); return flatMap(FlowableInternalHelper.flatMapIntoIterable(mapper), resultSelector, false, bufferSize(), bufferSize()); }
python
def iter_sources(self): """Iterates over all source names and IDs.""" for src_id in xrange(self.get_source_count()): yield src_id, self.get_source_name(src_id)
python
def _search_in_bases(type_): """Implementation detail.""" found = False for base_type in type_.declaration.bases: try: found = internal_type_traits.get_by_name( base_type.related_class, "element_type") except runtime_errors.declaration_not_found_t: pass if found: return found raise RuntimeError( ("Unable to find 'element_type' declaration '%s'" "in type '%s'.") % type_.decl_string)
python
def read_file(path): """ Read file to string. Arguments: path (str): Source. """ with open(must_exist(path)) as infile: r = infile.read() return r
java
private Pair<List<CloseableIterator<Entry<KeyType>>>, List<Future>> buildCombineTree( List<? extends CloseableIterator<Entry<KeyType>>> childIterators, Supplier<ByteBuffer> bufferSupplier, AggregatorFactory[] combiningFactories, int combineDegree, List<String> dictionary ) { final int numChildLevelIterators = childIterators.size(); final List<CloseableIterator<Entry<KeyType>>> childIteratorsOfNextLevel = new ArrayList<>(); final List<Future> combineFutures = new ArrayList<>(); // The below algorithm creates the combining nodes of the current level. It first checks that the number of children // to be combined together is 1. If it is, the intermediate combining node for that child is not needed. Instead, it // can be directly connected to a node of the parent level. Here is an example of generated tree when // numLeafNodes = 6 and leafCombineDegree = intermediateCombineDegree = 2. See the description of // MINIMUM_LEAF_COMBINE_DEGREE for more details about leafCombineDegree and intermediateCombineDegree. // // o // / \ // o \ // / \ \ // o o o // / \ / \ / \ // o o o o o o // // We can expect that the aggregates can be combined as early as possible because the tree is built in a bottom-up // manner. for (int i = 0; i < numChildLevelIterators; i += combineDegree) { if (i < numChildLevelIterators - 1) { final List<? extends CloseableIterator<Entry<KeyType>>> subIterators = childIterators.subList( i, Math.min(i + combineDegree, numChildLevelIterators) ); final Pair<CloseableIterator<Entry<KeyType>>, Future> iteratorAndFuture = runCombiner( subIterators, bufferSupplier.get(), combiningFactories, dictionary ); childIteratorsOfNextLevel.add(iteratorAndFuture.lhs); combineFutures.add(iteratorAndFuture.rhs); } else { // If there remains one child, it can be directly connected to a node of the parent level. childIteratorsOfNextLevel.add(childIterators.get(i)); } } if (childIteratorsOfNextLevel.size() == 1) { // This is the root return Pair.of(childIteratorsOfNextLevel, combineFutures); } else { // Build the parent level iterators final Pair<List<CloseableIterator<Entry<KeyType>>>, List<Future>> parentIteratorsAndFutures = buildCombineTree( childIteratorsOfNextLevel, bufferSupplier, combiningFactories, intermediateCombineDegree, dictionary ); combineFutures.addAll(parentIteratorsAndFutures.rhs); return Pair.of(parentIteratorsAndFutures.lhs, combineFutures); } }
java
@Override public <OUTPUT extends GVRHybridObject, INTER> void registerCallback( GVRContext gvrContext, Class<OUTPUT> outClass, CancelableCallback<OUTPUT> callback, GVRAndroidResource request, int priority) { requests.registerCallback(gvrContext, outClass, callback, request, priority); }
python
def getLoader(user, repo, sha=None, prov=None): """Build a fileLoader (LocalLoader or GithubLoader) for the given repository.""" if user is None and repo is None: loader = LocalLoader() else: loader = GithubLoader(user, repo, sha, prov) return loader
python
def determine_name(func): """ Given a function, returns the name of the function. Ex:: from random import choice determine_name(choice) # Returns 'choice' :param func: The callable :type func: function :returns: Name string """ if hasattr(func, '__name__'): return func.__name__ elif hasattr(func, '__class__'): return func.__class__.__name__ # This shouldn't be possible, but blow up if so. raise AttributeError("Provided callable '{}' has no name.".format( func ))
python
def build_tree_file_pathname(filename, directory_depth=8, pathname_separator_character=os.sep): """ Return a file pathname which pathname is built of the specified number of sub-directories, and where each directory is named after the nth letter of the filename corresponding to the directory depth. Examples:: >>> build_tree_file_pathname('foo.txt', 2, '/') 'f/o/foo.txt' >>> build_tree_file_pathname('0123456789abcdef') '0/1/2/3/4/5/6/7/0123456789abcdef' @param filename: name of a file, with or without extension. @param directory_depth: number of sub-directories to be generated. @param pathname_separator_character: character to be used to separate pathname components, such as '/' for POSIX and '\\' for Windows. If not defined, the default is the character used by the operating system ``os.sep``. @return: a file pathname. """ return build_tree_pathname(filename, directory_depth, pathname_separator_character) + filename
python
def new_program(self, _id, series, title, subtitle, description, mpaaRating, starRating, runTime, year, showType, colorCode, originalAirDate, syndicatedEpisodeNumber, advisories): """Callback run for each new program entry""" raise NotImplementedError()
java
public static MappedByteBuffer map(File file) { N.checkArgNotNull(file); return map(file, MapMode.READ_ONLY); }
java
public void setEscapeHtml(String value) { if (value != null) { m_escapeHtml = Boolean.valueOf(value.trim()).booleanValue(); } }
python
def _meet(intervals_hier, labels_hier, frame_size): '''Compute the (sparse) least-common-ancestor (LCA) matrix for a hierarchical segmentation. For any pair of frames ``(s, t)``, the LCA is the deepest level in the hierarchy such that ``(s, t)`` are contained within a single segment at that level. Parameters ---------- intervals_hier : list of ndarray An ordered list of segment interval arrays. The list is assumed to be ordered by increasing specificity (depth). labels_hier : list of list of str ``labels_hier[i]`` contains the segment labels for the ``i``th layer of the annotations frame_size : number The length of the sample frames (in seconds) Returns ------- meet_matrix : scipy.sparse.csr_matrix A sparse matrix such that ``meet_matrix[i, j]`` contains the depth of the deepest segment label containing both ``i`` and ``j``. ''' frame_size = float(frame_size) # Figure out how many frames we need n_start, n_end = _hierarchy_bounds(intervals_hier) n = int((_round(n_end, frame_size) - _round(n_start, frame_size)) / frame_size) # Initialize the meet matrix meet_matrix = scipy.sparse.lil_matrix((n, n), dtype=np.uint8) for level, (intervals, labels) in enumerate(zip(intervals_hier, labels_hier), 1): # Encode the labels at this level lab_enc = util.index_labels(labels)[0] # Find unique agreements int_agree = np.triu(np.equal.outer(lab_enc, lab_enc)) # Map intervals to frame indices int_frames = (_round(intervals, frame_size) / frame_size).astype(int) # For each intervals i, j where labels agree, update the meet matrix for (seg_i, seg_j) in zip(*np.where(int_agree)): idx_i = slice(*list(int_frames[seg_i])) idx_j = slice(*list(int_frames[seg_j])) meet_matrix[idx_i, idx_j] = level if seg_i != seg_j: meet_matrix[idx_j, idx_i] = level return scipy.sparse.csr_matrix(meet_matrix)
java
protected boolean isSuppressed() { if (_suppressed == null) { // we haven't called this method before, so determine the suppressed // value and cache it for later calls to this method. if (isFacet()) { // facets are always rendered by their parents --> suppressed _suppressed = Boolean.TRUE; return true; } UIComponent component = getComponentInstance(); // Does any parent render its children? // (We must determine this first, before calling any isRendered method // because rendered properties might reference a data var of a nesting UIData, // which is not set at this time, and would cause a VariableResolver error!) UIComponent parent = component.getParent(); while (parent != null) { if (parent.getRendersChildren()) { // Yes, parent found, that renders children --> suppressed _suppressed = Boolean.TRUE; return true; } parent = parent.getParent(); } // does component or any parent has a false rendered attribute? while (component != null) { if (!component.isRendered()) { // Yes, component or any parent must not be rendered --> suppressed _suppressed = Boolean.TRUE; return true; } component = component.getParent(); } // else --> not suppressed _suppressed = Boolean.FALSE; } return _suppressed.booleanValue(); }
java
public static Duration fromMillis(long millis) { long seconds = millis / MILLIS_PER_SECOND; int nanos = (int) (millis % MILLIS_PER_SECOND * NANOS_PER_MILLI); return Duration.create(seconds, nanos); }
java
public void setName(String name) { put(PdfName.NAME, new PdfString(name, PdfObject.TEXT_UNICODE)); }
python
def add_epoch(self, epoch_name, start_frame, end_frame): '''This function adds an epoch to your recording extractor that tracks a certain time period in your recording. It is stored in an internal dictionary of start and end frame tuples. Parameters ---------- epoch_name: str The name of the epoch to be added start_frame: int The start frame of the epoch to be added (inclusive) end_frame: int The end frame of the epoch to be added (exclusive) ''' # Default implementation only allows for frame info. Can override to put more info if isinstance(epoch_name, str): self._epochs[epoch_name] = {'start_frame': int(start_frame), 'end_frame': int(end_frame)} else: raise ValueError("epoch_name must be a string")
python
def gen_sponsor_schedule(user, sponsor=None, num_blocks=6, surrounding_blocks=None, given_date=None): r"""Return a list of :class:`EighthScheduledActivity`\s in which the given user is sponsoring. Returns: Dictionary with: activities no_attendance_today num_acts """ no_attendance_today = None acts = [] if sponsor is None: sponsor = user.get_eighth_sponsor() if surrounding_blocks is None: surrounding_blocks = EighthBlock.objects.get_upcoming_blocks(num_blocks) activities_sponsoring = (EighthScheduledActivity.objects.for_sponsor(sponsor).select_related("block").filter(block__in=surrounding_blocks)) sponsoring_block_map = {} for sa in activities_sponsoring: bid = sa.block.id if bid in sponsoring_block_map: sponsoring_block_map[bid] += [sa] else: sponsoring_block_map[bid] = [sa] num_acts = 0 for b in surrounding_blocks: num_added = 0 sponsored_for_block = sponsoring_block_map.get(b.id, []) for schact in sponsored_for_block: acts.append(schact) if schact.block.is_today(): if not schact.attendance_taken and schact.block.locked: no_attendance_today = True num_added += 1 if num_added == 0: # fake an entry for a block where there is no sponsorship acts.append({"block": b, "id": None, "fake": True}) else: num_acts += 1 logger.debug(acts) cur_date = surrounding_blocks[0].date if acts else given_date if given_date else datetime.now().date() last_block = surrounding_blocks[len(surrounding_blocks) - 1] if surrounding_blocks else None last_block_date = last_block.date + timedelta(days=1) if last_block else cur_date next_blocks = list(last_block.next_blocks(1)) if last_block else None next_date = next_blocks[0].date if next_blocks else last_block_date first_block = surrounding_blocks[0] if surrounding_blocks else None if cur_date and not first_block: first_block = EighthBlock.objects.filter(date__lte=cur_date).last() first_block_date = first_block.date + timedelta(days=-7) if first_block else cur_date prev_blocks = list(first_block.previous_blocks(num_blocks - 1)) if first_block else None prev_date = prev_blocks[0].date if prev_blocks else first_block_date return { "sponsor_schedule": acts, "no_attendance_today": no_attendance_today, "num_attendance_acts": num_acts, "sponsor_schedule_cur_date": cur_date, "sponsor_schedule_next_date": next_date, "sponsor_schedule_prev_date": prev_date }
java
public static boolean isUuid(final String uuid) { return uuid != null && (uuid.length() == 36 || uuid.length() == 32) && UUID_PATTERN.matcher(uuid).matches(); }
java
private void addTypesToFunctions( Node objLit, String thisType, PolymerClassDefinition.DefinitionType defType) { checkState(objLit.isObjectLit()); for (Node keyNode : objLit.children()) { Node value = keyNode.getLastChild(); if (value != null && value.isFunction()) { JSDocInfoBuilder fnDoc = JSDocInfoBuilder.maybeCopyFrom(keyNode.getJSDocInfo()); fnDoc.recordThisType( new JSTypeExpression(new Node(Token.BANG, IR.string(thisType)), VIRTUAL_FILE)); keyNode.setJSDocInfo(fnDoc.build()); } } // Add @this and @return to default property values. for (MemberDefinition property : PolymerPassStaticUtils.extractProperties( objLit, defType, compiler, /** constructor= */ null)) { if (!property.value.isObjectLit()) { continue; } Node defaultValue = NodeUtil.getFirstPropMatchingKey(property.value, "value"); if (defaultValue == null || !defaultValue.isFunction()) { continue; } Node defaultValueKey = defaultValue.getParent(); JSDocInfoBuilder fnDoc = JSDocInfoBuilder.maybeCopyFrom(defaultValueKey.getJSDocInfo()); fnDoc.recordThisType( new JSTypeExpression(new Node(Token.BANG, IR.string(thisType)), VIRTUAL_FILE)); fnDoc.recordReturnType(PolymerPassStaticUtils.getTypeFromProperty(property, compiler)); defaultValueKey.setJSDocInfo(fnDoc.build()); } }
python
def do_edit(self, args): """Edit a command with $EDITOR.""" if 'EDITOR' not in os.environ: print('*** $EDITOR not set') else: path = os.path.join(utils.CONFIG_DIR, 'sql') cmd = os.environ['EDITOR'] try: os.system(cmd + ' ' + path) if os.path.exists(path): with open(path, 'r') as f: sql = f.read() if sql: self.default(sql) finally: if os.path.exists(path): os.remove(path)
python
def p_changepassword(self): ''' Changing password. ''' post_data = self.get_post_data() usercheck = MUser.check_user(self.userinfo.uid, post_data['rawpass']) if usercheck == 1: MUser.update_pass(self.userinfo.uid, post_data['user_pass']) output = {'changepass ': usercheck} else: output = {'changepass ': 0} return json.dump(output, self)
java
public static Expression fields(String operator, Val<Expression>[] args, QueryExprMeta parent) { if (args.length < 1) { throw new QuerySyntaxException(Messages.get("dsl.arguments.error2", operator, 0)); } if (parent == null) { throw new QuerySyntaxException(Messages.get("dsl.arguments.error5", operator)); } else if (!parent.operator().equals("option")) { throw new QuerySyntaxException(Messages.get("dsl.arguments.error6", operator, "option")); } return TextFieldsExpression.of(args); }
java
public void setBrokerInstances(java.util.Collection<BrokerInstance> brokerInstances) { if (brokerInstances == null) { this.brokerInstances = null; return; } this.brokerInstances = new java.util.ArrayList<BrokerInstance>(brokerInstances); }
java
@TargetApi(Build.VERSION_CODES.HONEYCOMB_MR2) public static ViewTreeObserver.OnGlobalLayoutListener attach(final Activity activity, IPanelHeightTarget target, /* Nullable */ OnKeyboardShowingListener lis) { final ViewGroup contentView = activity.findViewById(android.R.id.content); final boolean isFullScreen = ViewUtil.isFullScreen(activity); final boolean isTranslucentStatus = ViewUtil.isTranslucentStatus(activity); final boolean isFitSystemWindows = ViewUtil.isFitsSystemWindows(activity); // get the screen height. final Display display = activity.getWindowManager().getDefaultDisplay(); final int screenHeight; if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB_MR2) { final Point screenSize = new Point(); display.getSize(screenSize); screenHeight = screenSize.y; } else { //noinspection deprecation screenHeight = display.getHeight(); } ViewTreeObserver.OnGlobalLayoutListener globalLayoutListener = new KeyboardStatusListener( isFullScreen, isTranslucentStatus, isFitSystemWindows, contentView, target, lis, screenHeight); contentView.getViewTreeObserver().addOnGlobalLayoutListener(globalLayoutListener); return globalLayoutListener; }