language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
private Map<Set<ServerIdentity>, ModelNode> getServerSystemPropertyOperations(ModelNode operation, PathAddress address, Level level, ModelNode domain, String affectedGroup, ModelNode host) { Map<Set<ServerIdentity>, ModelNode> result = null; if (isServerAffectingSystemPropertyOperation(operation)) { String propName = address.getLastElement().getValue(); boolean overridden = false; Set<String> groups = null; if (level == Level.DOMAIN || level == Level.SERVER_GROUP) { if (hasSystemProperty(host, propName)) { // host level value takes precedence overridden = true; } else if (affectedGroup != null) { groups = Collections.singleton(affectedGroup); } else if (domain.hasDefined(SERVER_GROUP)) { // Top level domain update applies to all groups where it was not overridden groups = new HashSet<String>(); for (Property groupProp : domain.get(SERVER_GROUP).asPropertyList()) { String groupName = groupProp.getName(); if (!hasSystemProperty(groupProp.getValue(), propName)) { groups.add(groupName); } } } } Set<ServerIdentity> servers = null; if (!overridden && host.hasDefined(SERVER_CONFIG)) { servers = new HashSet<ServerIdentity>(); for (Property serverProp : host.get(SERVER_CONFIG).asPropertyList()) { String serverName = serverProp.getName(); if (serverProxies.get(serverName) == null) { continue; } ModelNode server = serverProp.getValue(); if (!hasSystemProperty(server, propName)) { String groupName = server.require(GROUP).asString(); if (groups == null || groups.contains(groupName)) { servers.add(new ServerIdentity(localHostName, groupName, serverName)); } } } } if (servers != null && servers.size() > 0) { Map<ModelNode, Set<ServerIdentity>> ops = new HashMap<ModelNode, Set<ServerIdentity>>(); for (ServerIdentity server : servers) { ModelNode serverOp = getServerSystemPropertyOperation(operation, propName, server, level, domain, host); Set<ServerIdentity> set = ops.get(serverOp); if (set == null) { set = new HashSet<ServerIdentity>(); ops.put(serverOp, set); } set.add(server); } result = new HashMap<Set<ServerIdentity>, ModelNode>(); for (Map.Entry<ModelNode, Set<ServerIdentity>> entry : ops.entrySet()) { result.put(entry.getValue(), entry.getKey()); } } } if (result == null) { result = Collections.emptyMap(); } return result; }
java
public static boolean containsIgnoreCase(final CharSequence str, final CharSequence searchStr) { if (str == null || searchStr == null) { return false; } final int len = searchStr.length(); final int max = str.length() - len; for (int i = 0; i <= max; i++) { if (CharSequenceUtils.regionMatches(str, true, i, searchStr, 0, len)) { return true; } } return false; }
python
def _import_status(data, item, repo_name, repo_tag): ''' Process a status update from docker import, updating the data structure ''' status = item['status'] try: if 'Downloading from' in status: return elif all(x in string.hexdigits for x in status): # Status is an image ID data['Image'] = '{0}:{1}'.format(repo_name, repo_tag) data['Id'] = status except (AttributeError, TypeError): pass
python
def _process_disease2gene(self, row): """ Here, we process the disease-to-gene associations. Note that we ONLY process direct associations (not inferred through chemicals). Furthermore, we also ONLY process "marker/mechanism" associations. We preferentially utilize OMIM identifiers over MESH identifiers for disease/phenotype. Therefore, if a single OMIM id is listed under the "omim_ids" list, we will choose this over any MeSH id that might be listed as the disease_id. If multiple OMIM ids are listed in the omim_ids column, we toss this for now. (Mostly, we are not sure what to do with this information.) We also pull in the MeSH labels here (but not OMIM) to ensure that we have them (as they may not be brought in separately). :param row: :return: """ # if self.test_mode: # graph = self.testgraph # else: # graph = self.graph # self._check_list_len(row, 9) # geno = Genotype(graph) # gu = GraphUtils(curie_map.get()) model = Model(self.graph) (gene_symbol, gene_id, disease_name, disease_id, direct_evidence, inference_chemical_name, inference_score, omim_ids, pubmed_ids) = row # we only want the direct associations; skipping inferred for now if direct_evidence == '' or direct_evidence != 'marker/mechanism': return # scrub some of the associations... # it seems odd to link human genes to the following "diseases" diseases_to_scrub = [ 'MESH:D004283', # dog diseases 'MESH:D004195', # disease models, animal 'MESH:D030342', # genetic diseases, inborn 'MESH:D040181', # genetic dieases, x-linked 'MESH:D020022'] # genetic predisposition to a disease if disease_id in diseases_to_scrub: LOG.info( "Skipping association between NCBIGene:%s and %s", str(gene_id), disease_id) return intersect = list( set(['OMIM:' + str(i) for i in omim_ids.split('|')] + [disease_id]) & set(self.test_diseaseids)) if self.test_mode and ( int(gene_id) not in self.test_geneids or len(intersect) < 1): return # there are three kinds of direct evidence: # (marker/mechanism | marker/mechanism|therapeutic | therapeutic) # we are only using the "marker/mechanism" for now # TODO what does it mean for a gene to be therapeutic for disease? # a therapeutic target? gene_id = 'NCBIGene:' + gene_id preferred_disease_id = disease_id if omim_ids is not None and omim_ids != '': omim_id_list = re.split(r'\|', omim_ids) # If there is only one OMIM ID for the Disease ID # or in the omim_ids list, # use the OMIM ID preferentially over any MeSH ID. if re.match(r'OMIM:.*', disease_id): if len(omim_id_list) > 1: # the disease ID is an OMIM ID and # there is more than one OMIM entry in omim_ids. # Currently no entries satisfy this condition pass elif disease_id != ('OMIM:' + omim_ids): # the disease ID is an OMIM ID and # there is only one non-equiv OMIM entry in omim_ids # we preferentially use the disease_id here LOG.warning( "There may be alternate identifier for %s: %s", disease_id, omim_ids) # TODO: What should be done with the alternate disease IDs? else: if len(omim_id_list) == 1: # the disease ID is not an OMIM ID # and there is only one OMIM entry in omim_ids. preferred_disease_id = 'OMIM:' + omim_ids elif len(omim_id_list) > 1: # This is when the disease ID is not an OMIM ID and # there is more than one OMIM entry in omim_ids. pass model.addClassToGraph(gene_id, None) # not sure if MESH is getting added separately. # adding labels here for good measure dlabel = None if re.match(r'MESH', preferred_disease_id): dlabel = disease_name model.addClassToGraph(preferred_disease_id, dlabel) # Add the disease to gene relationship. rel_id = self.resolve(direct_evidence) refs = self._process_pubmed_ids(pubmed_ids) self._make_association(gene_id, preferred_disease_id, rel_id, refs) return
java
@Override public void doAbortSessionRequestEvent(ClientAuthSession appSession, AbortSessionRequest asr) throws InternalException, IllegalDiameterStateException, RouteException, OverloadException { logger.info("Diameter Gq AuthorizationSessionFactory :: doAbortSessionRequestEvent :: appSession[{}], ASR[{}]", appSession, asr); }
python
def add_and_filter(self, *values): """ Add a filter using "AND" logic. This filter is useful when requiring multiple matches to evaluate to true. For example, searching for a specific IP address in the src field and another in the dst field. .. seealso:: :class:`smc_monitoring.models.filters.AndFilter` for examples. :param values: optional constructor args for :class:`smc_monitoring.models.filters.AndFilter`. Typically this is a list of InFilter expressions. :type: list(QueryFilter) :rtype: AndFilter """ filt = AndFilter(*values) self.update_filter(filt) return filt
java
public static @Nullable String getPrefix(@NonNull View view) { return prefixes.get(view.getId()); }
python
def port_channel_redundancy_group_group_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_channel_redundancy_group = ET.SubElement(config, "port-channel-redundancy-group", xmlns="urn:brocade.com:mgmt:brocade-lag") group_id = ET.SubElement(port_channel_redundancy_group, "group-id") group_id.text = kwargs.pop('group_id') callback = kwargs.pop('callback', self._callback) return callback(config)
java
public static TableId of(String dataset, String table) { return new TableId(null, checkNotNull(dataset), checkNotNull(table)); }
python
def randomSlugField(self): """ Return the unique slug by generating the uuid4 to fix the duplicate slug (unique=True) """ lst = [ "sample-slug-{}".format(uuid.uuid4().hex), "awesome-djipsum-{}".format(uuid.uuid4().hex), "unique-slug-{}".format(uuid.uuid4().hex) ] return self.randomize(lst)
java
@Override public void upload(String path, InputStream payload, long payloadSize) { try { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(CONTENT_TYPE); objectMetadata.setContentLength(payloadSize); PutObjectRequest request = new PutObjectRequest(bucketName, path, payload, objectMetadata); s3Client.putObject(request); } catch (SdkClientException e) { String msg = "Error communicating with S3"; logger.error(msg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); } }
java
public Ontology parseOBO( BufferedReader oboFile, String ontoName, String ontoDescription ) throws ParseException, IOException { try { OntologyFactory factory = OntoTools.getDefaultFactory(); Ontology ontology = factory.createOntology(ontoName, ontoDescription); OboFileParser parser = new OboFileParser(); OboFileEventListener handler = new OboFileHandler(ontology); parser.addOboFileEventListener(handler); parser.parseOBO(oboFile); return ontology; } catch (AlreadyExistsException ex) { throw new RuntimeException( "Duplication in ontology"); } catch (OntologyException ex) { throw new RuntimeException(ex); } }
java
public void ifge(String target) throws IOException { if (wideIndex) { out.writeByte(NOT_IFGE); out.writeShort(WIDEFIXOFFSET); Branch branch = createBranch(target); out.writeByte(GOTO_W); out.writeInt(branch); } else { Branch branch = createBranch(target); out.writeOpCode(IFGE); out.writeShort(branch); } }
java
public static void addAction(Set<Action> aas, Action action, boolean condition) { if (condition) { aas.add(action); } }
java
@Override public void doRender(final WComponent component, final WebXmlRenderContext renderContext) { WPasswordField field = (WPasswordField) component; XmlStringBuilder xml = renderContext.getWriter(); boolean readOnly = field.isReadOnly(); xml.appendTagOpen(TAG_NAME); xml.appendAttribute("id", component.getId()); xml.appendOptionalAttribute("class", component.getHtmlClass()); xml.appendOptionalAttribute("track", component.isTracking(), "true"); xml.appendOptionalAttribute("hidden", component.isHidden(), "true"); if (readOnly) { xml.appendAttribute("readOnly", "true"); xml.appendEnd(); return; } int cols = field.getColumns(); int minLength = field.getMinLength(); int maxLength = field.getMaxLength(); WComponent submitControl = field.getDefaultSubmitButton(); String submitControlId = submitControl == null ? null : submitControl.getId(); xml.appendOptionalAttribute("disabled", field.isDisabled(), "true"); xml.appendOptionalAttribute("required", field.isMandatory(), "true"); xml.appendOptionalAttribute("minLength", minLength > 0, minLength); xml.appendOptionalAttribute("maxLength", maxLength > 0, maxLength); xml.appendOptionalAttribute("toolTip", field.getToolTip()); xml.appendOptionalAttribute("accessibleText", field.getAccessibleText()); xml.appendOptionalAttribute("size", cols > 0, cols); xml.appendOptionalAttribute("buttonId", submitControlId); String placeholder = field.getPlaceholder(); xml.appendOptionalAttribute("placeholder", !Util.empty(placeholder), placeholder); String autocomplete = field.getAutocomplete(); xml.appendOptionalAttribute("autocomplete", !Util.empty(autocomplete), autocomplete); List<Diagnostic> diags = field.getDiagnostics(Diagnostic.ERROR); if (diags == null || diags.isEmpty()) { xml.appendEnd(); return; } xml.appendClose(); DiagnosticRenderUtil.renderDiagnostics(field, renderContext); xml.appendEndTag(TAG_NAME); }
python
def extract_texkeys_from_pdf(pdf_file): """ Extract the texkeys from the given PDF file This is done by looking up the named destinations in the PDF @param pdf_file: path to a PDF @return: list of all texkeys found in the PDF """ with open(pdf_file, 'rb') as pdf_stream: try: pdf = PdfFileReader(pdf_stream, strict=False) destinations = pdf.getNamedDestinations() except Exception: LOGGER.debug(u"PDF: Internal PyPDF2 error, no TeXkeys returned.") return [] # not all named destinations point to references refs = [dest for dest in destinations.iteritems() if re_reference_in_dest.match(dest[0])] try: if _destinations_in_two_columns(pdf, refs): LOGGER.debug(u"PDF: Using two-column layout") def sortfunc(dest_couple): return _destination_position(pdf, dest_couple[1]) else: LOGGER.debug(u"PDF: Using single-column layout") def sortfunc(dest_couple): (page, _, ypos, xpos) = _destination_position( pdf, dest_couple[1]) return (page, ypos, xpos) refs.sort(key=sortfunc) # extract the TeXkey from the named destination name return [re_reference_in_dest.match(destname).group(1) for (destname, _) in refs] except Exception: LOGGER.debug(u"PDF: Impossible to determine layout, no TeXkeys returned") return []
python
def build_filename(self, binary): """Return the proposed filename with extension for the binary.""" return '%(TIMESTAMP)s%(BRANCH)s%(DEBUG)s-%(NAME)s' % { 'TIMESTAMP': self.timestamp + '-' if self.timestamp else '', 'BRANCH': self.branch, 'DEBUG': '-debug' if self.debug_build else '', 'NAME': binary}
java
public static boolean isAllowedKeyType(SoyType type) { switch (type.getKind()) { case BOOL: case INT: case FLOAT: case STRING: case PROTO_ENUM: return true; default: return type == SoyTypes.NUMBER_TYPE; } }
python
def download(model=None, direct=False, *pip_args): """ Download compatible model from default download path using pip. Model can be shortcut, model name or, if --direct flag is set, full model name with version. """ if model is None: model = about.__default_corpus__ if direct: dl = download_model('{m}/{m}.tar.gz#egg={m}'.format(m=model), pip_args) else: shortcuts = get_json(about.__shortcuts__, "available shortcuts") model_name = shortcuts.get(model, model) compatibility = get_compatibility() version = get_version(model_name, compatibility) dl = download_model('{m}-{v}/{m}-{v}.tar.gz#egg={m}=={v}' .format(m=model_name, v=version), pip_args) if dl != 0: # if download subprocess doesn't return 0, exit sys.exit(dl) try: # Get package path here because link uses # pip.get_installed_distributions() to check if model is a # package, which fails if model was just installed via # subprocess package_path = get_package_path(model_name) link(model_name, model, force=True, model_path=package_path) except: # Dirty, but since spacy.download and the auto-linking is # mostly a convenience wrapper, it's best to show a success # message and loading instructions, even if linking fails. prints(Messages.M001.format(name=model_name), title=Messages.M002)
java
@Nonnull private String _readAndParseCSS (@Nonnull final IHasInputStream aISP, @Nonnull @Nonempty final String sBasePath, final boolean bRegular) { final CascadingStyleSheet aCSS = CSSReader.readFromStream (aISP, m_aCharset, ECSSVersion.CSS30); if (aCSS == null) { LOGGER.error ("Failed to parse CSS. Returning 'as-is'"); return StreamHelper.getAllBytesAsString (aISP, m_aCharset); } CSSVisitor.visitCSSUrl (aCSS, new AbstractModifyingCSSUrlVisitor () { @Override protected String getModifiedURI (@Nonnull final String sURI) { if (LinkHelper.hasKnownProtocol (sURI)) { // If e.g. an external resource is includes. // Example: https://fonts.googleapis.com/css return sURI; } return FilenameHelper.getCleanConcatenatedUrlPath (sBasePath, sURI); } }); // Write again after modification return new CSSWriter (ECSSVersion.CSS30, !bRegular).setWriteHeaderText (false) .setWriteFooterText (false) .getCSSAsString (aCSS); }
python
async def raw(self, key, *, dc=None, watch=None, consistency=None): """Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the raw value """ response = await self._read(key, dc=dc, raw=True, watch=watch, consistency=consistency) return consul(response)
java
@SuppressWarnings("deprecation") public List<String> compileToJsSrc( SoyJsSrcOptions jsSrcOptions, @Nullable SoyMsgBundle msgBundle) { resetErrorReporter(); // JS has traditionally allowed unknown globals, as a way for soy to reference normal js enums // and constants. For consistency/reusability of templates it would be nice to not allow that // but the cat is out of the bag. PassManager.Builder builder = passManagerBuilder().allowUnknownGlobals().allowV1Expression().desugarHtmlNodes(false); ParseResult result = parse(builder); throwIfErrorsPresent(); TemplateRegistry registry = result.registry(); SoyFileSetNode fileSet = result.fileSet(); List<String> generatedSrcs = new JsSrcMain(scopedData.enterable(), typeRegistry) .genJsSrc(fileSet, registry, jsSrcOptions, msgBundle, errorReporter); throwIfErrorsPresent(); reportWarnings(); return generatedSrcs; }
python
def drain_transport(self): ''' "Drain" the transport connection. This command simply returns all waiting messages sent from the remote chrome instance. This can be useful when waiting for a specific asynchronous message from chrome, but higher level calls are better suited for managing wait-for-message type needs. ''' self.transport.check_process_ded() ret = self.transport.drain(tab_key=self.tab_id) self.transport.check_process_ded() return ret
python
def run(self): """ Begins the runtime execution. """ iterations = 0 queue = self.queue.tick() while True: try: next(queue) except StopIteration: break iterations += 1 sleep(self.sleep_time) return iterations
java
public static HistogramDataPoint decodeHistogramDataPoint(final TSDB tsdb, final long base_time, final byte[] qualifier, final byte[] value) { final HistogramDataPointCodec decoder = tsdb.histogramManager().getCodec((int) value[0]); long timestamp = getTimeStampFromNonDP(base_time, qualifier); final Histogram histogram = decoder.decode(value, true); return new SimpleHistogramDataPointAdapter(histogram, timestamp); }
java
@Override public void launch(@NonNull ThreadingModel threading) { this.threadingModel = threading; switch (threading) { case SINGLE_THREAD: { log.warn("SINGLE_THREAD model is used, performance will be significantly reduced"); // single thread for all queues. shouldn't be used in real world threadA = new Thread(() -> { while (runner.get()) { if (subscriptionForShards != null) subscriptionForShards.poll(messageHandlerForShards, 512); idler.idle(subscriptionForClients.poll(messageHandlerForClients, 512)); } }); threadA.setDaemon(true); threadA.start(); } break; case DEDICATED_THREADS: { // we start separate thread for each handler /** * We definitely might use less conditional code here, BUT i'll keep it as is, * only because we want code to be obvious for people */ final AtomicBoolean localRunner = new AtomicBoolean(false); if (nodeRole == NodeRole.NONE) { throw new ND4JIllegalStateException("No role is set for current node!"); } else if (nodeRole == NodeRole.SHARD || nodeRole == NodeRole.BACKUP || nodeRole == NodeRole.MASTER) { // // Shard or Backup uses two subscriptions // setting up thread for shard->client communication listener if (messageHandlerForShards != null) { threadB = new Thread(() -> { while (runner.get()) idler.idle(subscriptionForShards.poll(messageHandlerForShards, 512)); }); threadB.setDaemon(true); threadB.setName("VoidParamServer subscription threadB [" + nodeRole + "]"); } // setting up thread for inter-shard communication listener threadA = new Thread(() -> { localRunner.set(true); while (runner.get()) idler.idle(subscriptionForClients.poll(messageHandlerForClients, 512)); }); if (threadB != null) { Nd4j.getAffinityManager().attachThreadToDevice(threadB, Nd4j.getAffinityManager().getDeviceForCurrentThread()); threadB.setDaemon(true); threadB.setName("VoidParamServer subscription threadB [" + nodeRole + "]"); threadB.start(); } } else { // setting up thread for shard->client communication listener threadA = new Thread(() -> { localRunner.set(true); while (runner.get()) idler.idle(subscriptionForClients.poll(messageHandlerForClients, 512)); }); } // all roles have threadA anyway Nd4j.getAffinityManager().attachThreadToDevice(threadA, Nd4j.getAffinityManager().getDeviceForCurrentThread()); threadA.setDaemon(true); threadA.setName("VoidParamServer subscription threadA [" + nodeRole + "]"); threadA.start(); while (!localRunner.get()) try { Thread.sleep(50); } catch (Exception e) { } } break; case SAME_THREAD: { // no additional threads at all, we do poll within takeMessage loop log.warn("SAME_THREAD model is used, performance will be dramatically reduced"); } break; default: throw new IllegalStateException("Unknown thread model: [" + threading.toString() + "]"); } }
python
def calculateFirstDifference(filePath, outputFilePath): """ Create an auxiliary data file that contains first difference of the predicted field :param filePath: path of the original data file """ predictedField = SWARM_CONFIG['inferenceArgs']['predictedField'] data = pd.read_csv(filePath, header=0, skiprows=[1,2]) predictedFieldVals = data[predictedField].astype('float') firstDifference = predictedFieldVals.diff() data[predictedField] = firstDifference inputFile = open(filePath, "r") outputFile = open(outputFilePath, "w") csvReader = csv.reader(inputFile) csvWriter = csv.writer(outputFile) # write headlines for _ in xrange(3): readrow = csvReader.next() csvWriter.writerow(readrow) for i in xrange(len(data)): csvWriter.writerow(list(data.iloc[i])) inputFile.close() outputFile.close()
python
def mr_dim_ind(self): """Return int, tuple of int, or None, representing MR indices. The return value represents the index of each multiple-response (MR) dimension in this cube. Return value is None if there are no MR dimensions, and int if there is one MR dimension, and a tuple of int when there are more than one. The index is the (zero-based) position of the MR dimensions in the _ApparentDimensions sequence returned by the :attr"`.dimensions` property. """ # TODO: rename to `mr_dim_idxs` or better yet get rid of need for # this as it's really a cube internal characteristic. # TODO: Make this return a tuple in all cases, like (), (1,), or (0, 2). indices = tuple( idx for idx, d in enumerate(self.dimensions) if d.dimension_type == DT.MR_SUBVAR ) if indices == (): return None if len(indices) == 1: return indices[0] return indices
java
public static Table columnPercents(Table table, String column1, String column2) { return columnPercents(table, table.categoricalColumn(column1), table.categoricalColumn(column2)); }
java
public EventsResults getByType(String appId, EventType eventType, String timespan, String filter, String search, String orderby, String select, Integer skip, Integer top, String format, Boolean count, String apply) { return getByTypeWithServiceResponseAsync(appId, eventType, timespan, filter, search, orderby, select, skip, top, format, count, apply).toBlocking().single().body(); }
java
public boolean installed(Class<?> component) { Preconditions.checkArgument(component != null, "Parameter 'component' must not be [" + component + "]"); return installed.contains(component); }
java
@Override public void visitClassContext(ClassContext clsContext) { try { stack = new OpcodeStack(); super.visitClassContext(clsContext); } finally { stack = null; } }
java
public int readInt() throws IOException { int byte1 = in.read(); int byte2 = in.read(); int byte3 = in.read(); int byte4 = in.read(); if (byte4 < 0) { throw new EOFException(); } return (byte4 << 24) | ((byte3 << 24) >>> 8) | ((byte2 << 24) >>> 16) | ((byte1 << 24) >>> 24); }
python
async def fetch_data(self): """Get the latest data from EBox.""" # Get http session await self._get_httpsession() # Get login page token = await self._get_login_page() # Post login page await self._post_login_page(token) # Get home data home_data = await self._get_home_data() # Get usage data usage_data = await self._get_usage_data() # Merge data self._data.update(home_data) self._data.update(usage_data)
python
def read_in_survey_parameters( log, pathToSettingsFile ): """ *First reads in the mcs_settings.yaml file to determine the name of the settings file to read in the survey parameters.* **Key Arguments:** - ``log`` -- logger - ``pathToSettingsFile`` -- path to the settings file for the simulation **Return:** - a tuple of settings lists and dictionaries """ ################ > IMPORTS ################ ## STANDARD LIB ## ## THIRD PARTY ## import yaml ## LOCAL APPLICATION ## ############### VARIABLE ATTRIBUTES ############# ################ >ACTION(S) ################ # READ THE NAME OF THE SETTINGS FILE FOR THIS SIMULATION try: stream = file(pathToSettingsFile, 'r') thisDict = yaml.load(stream) stream.close() except Exception, e: print str(e) # NOW READ IN THE USER SET MCS SETTINGS try: stream = file(pathToSettingsFile, 'r') thisDict = yaml.load(stream) stream.close() except Exception, e: print str(e) allSettings = thisDict programSettings = thisDict["Program Settings"] limitingMags = thisDict["Limiting Magnitudes"] # for key in limitingMags: # log.debug('filter: %s, limit: %s' % (key, limitingMags[key])) sampleNumber = thisDict["Simulation Sample"] peakMagnitudeDistributions = thisDict[ "SN Absolute Peak-Magnitude Distributions"] #log.debug('snDistributions[magnitude] %s' % (snDistributions["magnitude"],)) #log.debug('snDistributions[sigma] %s' % (snDistributions["sigma"],)) relativeRatesSet = thisDict["Relative Rate Set to Use"] relativeSNRates = thisDict["Relative SN Rates"][relativeRatesSet] #log.debug('relativeSNRates %s' % (relativeSNRates,)) lowerReshiftLimit = thisDict["Lower Redshift Limit"] upperRedshiftLimit = thisDict["Upper Redshift Limit"] #log.debug('upperRedshiftLimit %s' % (upperRedshiftLimit,)) redshiftResolution = thisDict["Redshift Resolution"] extinctionSettings = thisDict["Extinctions"] extinctionType = extinctionSettings["constant or random"] extinctionConstant = extinctionSettings["constant E(b-v)"] hostExtinctionDistributions = extinctionSettings["host"] #log.debug('hostExtinctionDistributions %s' % (hostExtinctionDistributions,)) galacticExtinctionDistribution = extinctionSettings["galactic"] #log.debug('galacticExtinctionDistribution %s' % (galacticExtinctionDistribution,)) surveyCadenceSettings = thisDict["Survey Cadence"] #log.debug('surveyCadenceSettings %s' % (surveyCadenceSettings,)) explosionDaysFromSettings = thisDict["Explosion Days"] extendLightCurveTail = thisDict["Extend lightcurve tail?"] snLightCurves = thisDict["Lightcurves"] lightCurvePolyOrder = thisDict[ "Order of polynomial used to fits lightcurves"] #log.debug('snlightCurves %s' % (snlightCurves,)) surveyArea = thisDict["Sky Area of the Survey (square degrees)"] CCSNRateFraction = thisDict["CCSN Progenitor Population Fraction of IMF"] transientToCCSNRateFraction = thisDict["Transient to CCSN Ratio"] extraSurveyConstraints = thisDict["Extra Survey Constraints"] restFrameFilter = thisDict["Rest Frame Filter for K-corrections"] kCorrectionTemporalResolution = thisDict[ "K-correction temporal resolution (days)"] kCorPolyOrder = thisDict["Order of polynomial used to fits k-corrections"] kCorMinimumDataPoints = thisDict[ "Minimum number of datapoints used to generate k-correction curve"] logLevel = thisDict["Level of logging required"] return ( allSettings, programSettings, limitingMags, sampleNumber, peakMagnitudeDistributions, explosionDaysFromSettings, extendLightCurveTail, relativeSNRates, lowerReshiftLimit, upperRedshiftLimit, redshiftResolution, restFrameFilter, kCorrectionTemporalResolution, kCorPolyOrder, kCorMinimumDataPoints, extinctionType, extinctionConstant, hostExtinctionDistributions, galacticExtinctionDistribution, surveyCadenceSettings, snLightCurves, surveyArea, CCSNRateFraction, transientToCCSNRateFraction, extraSurveyConstraints, lightCurvePolyOrder, logLevel)
java
public com.google.api.ads.admanager.axis.v201808.PremiumAdjustmentType getAdjustmentType() { return adjustmentType; }
java
public static Method copyMethod(Method method) { try { if (jlrMethodRootField != null) { jlrMethodRootField.set(method, null); } return (Method) jlrMethodCopy.invoke(method); } catch (Exception e) { log.log(Level.SEVERE, "Problems copying method. Incompatible JVM?", e); return method; // return original as the best we can do } }
python
def _clear(self) -> None: """ Resets the internal state of the simulator, and sets the simulated clock back to 0.0. This discards all outstanding events and tears down hanging process instances. """ for _, event, _, _ in self.events(): if hasattr(event, "__self__") and isinstance(event.__self__, Process): # type: ignore event.__self__.throw() # type: ignore self._events.clear() self._ts_now = 0.0
python
def save(self, *objs, condition=None, atomic=False) -> "WriteTransaction": """ Add one or more objects to be saved in this transaction. At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object multiple times. :param objs: Objects to add to the set that are updated in this transaction. :param condition: A condition for these objects which must hold for the transaction to commit. :param bool atomic: only commit the transaction if the local and DynamoDB versions of the object match. :return: this transaction for chaining """ self._extend([TxItem.new("save", obj, condition, atomic) for obj in objs]) return self
java
public AuthMethod getAcceptableAuthMethod(Set<Class> acceptableAuthMethodClasses) throws NexmoUnacceptableAuthException { for (AuthMethod availableAuthMethod : this.authList) { if (acceptableAuthMethodClasses.contains(availableAuthMethod.getClass())) { return availableAuthMethod; } } throw new NexmoUnacceptableAuthException(this.authList, acceptableAuthMethodClasses); }
python
def _package_conf_file_to_dir(file_name): ''' Convert a config file to a config directory. ''' if file_name in SUPPORTED_CONFS: path = BASE_PATH.format(file_name) if os.path.exists(path): if os.path.isdir(path): return False else: os.rename(path, path + '.tmpbak') os.mkdir(path, 0o755) os.rename(path + '.tmpbak', os.path.join(path, 'tmp')) return True else: os.mkdir(path, 0o755) return True
java
public static Source retrieve(String source) throws StripeException { return retrieve(source, (Map<String, Object>) null, (RequestOptions) null); }
java
@Override public void eSet(int featureID, Object newValue) { switch (featureID) { case SimpleAntlrPackage.AND_EXPRESSION__LEFT: setLeft((Expression)newValue); return; case SimpleAntlrPackage.AND_EXPRESSION__RIGHT: setRight((Expression)newValue); return; } super.eSet(featureID, newValue); }
python
def _multi_rpush_pipeline(self, pipe, queue, values, bulk_size=0): ''' Pushes multiple elements to a list in a given pipeline If bulk_size is set it will execute the pipeline every bulk_size elements ''' cont = 0 for value in values: pipe.rpush(queue, value) if bulk_size != 0 and cont % bulk_size == 0: pipe.execute()
java
public CreateLaunchConfigurationRequest withClassicLinkVPCSecurityGroups(String... classicLinkVPCSecurityGroups) { if (this.classicLinkVPCSecurityGroups == null) { setClassicLinkVPCSecurityGroups(new com.amazonaws.internal.SdkInternalList<String>(classicLinkVPCSecurityGroups.length)); } for (String ele : classicLinkVPCSecurityGroups) { this.classicLinkVPCSecurityGroups.add(ele); } return this; }
python
def _read_dataframe(filename): """ Reads the original dataset TSV as a pandas dataframe """ # delay importing this to avoid another dependency import pandas # read in triples of user/artist/playcount from the input dataset # get a model based off the input params start = time.time() log.debug("reading data from %s", filename) data = pandas.read_table(filename, usecols=[0, 1, 3], names=['user', 'item', 'rating']) # map each artist and user to a unique numeric value data['user'] = data['user'].astype("category") data['item'] = data['item'].astype("category") # store as a CSR matrix log.debug("read data file in %s", time.time() - start) return data
java
public static Footprint measure(Object rootObject, Predicate<Object> objectAcceptor) { Preconditions.checkNotNull(objectAcceptor, "predicate"); Predicate<Chain> completePredicate = Predicates.and(ImmutableList.of( ObjectExplorer.notEnumFieldsOrClasses, new ObjectExplorer.AtMostOncePredicate(), Predicates.compose(objectAcceptor, ObjectExplorer.chainToObject) )); return ObjectExplorer.exploreObject(rootObject, new ObjectGraphVisitor(completePredicate), EnumSet.of(Feature.VISIT_PRIMITIVES, Feature.VISIT_NULL)); }
java
public void setProperty(String name, Object value) { Params.notNullOrEmpty(name, "Property name"); if(value != null) { properties.setProperty(name, converter.asString(value)); } }
python
def follow_tail(self): """ Read (tail and follow) the log file, parse entries and send messages to Sentry using Raven. """ try: follower = tailhead.follow_path(self.filepath) except (FileNotFoundError, PermissionError) as err: raise SystemExit("Error: Can't read logfile %s (%s)" % (self.filepath, err)) for line in follower: self.message = None self.params = None self.site = None if line is not None: self.parse(line) send_message(self.message, self.params, self.site, self.logger)
python
def plot_histogram(self, title_prefix="", title_override="", figsize=(8, 6)): """ Plots a histogram of the results after the Monte Carlo simulation is run. NOTE- This method must be called AFTER "roll_mc". :param title_prefix: If desired, prefix the title (such as "Alg 1") :param title_override: Override the title string entirely :param figsize: The size of the histogram plot :return: a seaborn figure of the histogram """ # Check that roll_mc has been called if not self.arr_res: raise ValueError("Call roll_mc before plotting the histogram.") # Find a title using either the override or _construct_title method if title_override: title = title_override else: title = title_prefix + PBE._construct_title(self.num_dice, self.dice_type, self.add_val, self.num_attribute, self.keep_attribute, self.keep_dice, self.reroll, self.num_arrays) # Construct the histogram f = self._plot_hist(self.arr_res, self.pbe_res, title, figsize) return f
python
def _generate_derived_key(password, salt=None, iterations=None): """ Generate a derived key by feeding 'password' to the Password-Based Key Derivation Function (PBKDF2). pyca/cryptography's PBKDF2 implementation is used in this module. 'salt' may be specified so that a previous derived key may be regenerated, otherwise '_SALT_SIZE' is used by default. 'iterations' is the number of SHA-256 iterations to perform, otherwise '_PBKDF2_ITERATIONS' is used by default. """ # Use pyca/cryptography's default backend (e.g., openSSL, CommonCrypto, etc.) # The default backend is not fixed and can be changed by pyca/cryptography # over time. backend = default_backend() # If 'salt' and 'iterations' are unspecified, a new derived key is generated. # If specified, a deterministic key is derived according to the given # 'salt' and 'iterrations' values. if salt is None: salt = os.urandom(_SALT_SIZE) if iterations is None: iterations = _PBKDF2_ITERATIONS # Derive an AES key with PBKDF2. The 'length' is the desired key length of # the derived key. pbkdf_object = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=iterations, backend=backend) derived_key = pbkdf_object.derive(password.encode('utf-8')) return salt, iterations, derived_key
java
public static Map<String, String> params1st(){ //TODO: candidate for performance optimization Map<String, String> params = new HashMap<>(); Enumeration names = RequestContext.getHttpRequest().getParameterNames(); while (names.hasMoreElements()) { String name = names.nextElement().toString(); params.put(name, RequestContext.getHttpRequest().getParameter(name)); } if(getId() != null) params.put("id", getId()); Map<String, String> userSegments = RequestContext.getRequestVo().getUserSegments(); params.putAll(userSegments); return params; }
python
def least_squares(Cui, X, Y, regularization, num_threads=0): """ For each user in Cui, calculate factors Xu for them using least squares on Y. Note: this is at least 10 times slower than the cython version included here. """ users, n_factors = X.shape YtY = Y.T.dot(Y) for u in range(users): X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors)
python
def bbox(ctx, tile): """Print Tile bounding box as geometry.""" geom = TilePyramid( ctx.obj['grid'], tile_size=ctx.obj['tile_size'], metatiling=ctx.obj['metatiling'] ).tile(*tile).bbox(pixelbuffer=ctx.obj['pixelbuffer']) if ctx.obj['output_format'] in ['WKT', 'Tile']: click.echo(geom) elif ctx.obj['output_format'] == 'GeoJSON': click.echo(geojson.dumps(geom))
java
static public void recursiveCreateDirectory(FTPClient ftpClient, String path) throws IOException { logger.info("Create Directory: {}", path); int createDirectoryStatus = ftpClient.mkd(path); // makeDirectory... logger.debug("Create Directory Status: {}", createDirectoryStatus); if (createDirectoryStatus == FTP_FILE_NOT_FOUND) { int sepIdx = path.lastIndexOf('/'); if (sepIdx > -1) { String parentPath = path.substring(0, sepIdx); recursiveCreateDirectory(ftpClient, parentPath); logger.debug("2'nd CreateD irectory: {}", path); createDirectoryStatus = ftpClient.mkd(path); // makeDirectory... logger.debug("2'nd Create Directory Status: {}", createDirectoryStatus); } } }
python
def download_async(self, remote_path, local_path, callback=None): """Downloads remote resources from WebDAV server asynchronously :param remote_path: the path to remote resource on WebDAV server. Can be file and directory. :param local_path: the path to save resource locally. :param callback: the callback which will be invoked when downloading is complete. """ target = (lambda: self.download_sync(local_path=local_path, remote_path=remote_path, callback=callback)) threading.Thread(target=target).start()
python
def evaluate_familytree(self, family_tree, image_set): """ Evaluate strategy for the given family tree and return a dict of images to analyze that match the strategy :param family_tree: the family tree to traverse and evaluate :param image_set: list of all images in the context :return: """ if family_tree is None or image_set is None: raise ValueError('Cannot execute analysis strategy on None image or image with no familytree data') toanalyze = OrderedDict() tree_len = len(family_tree) for i in family_tree: image = image_set[i] if self._should_analyze_image(image, family_tree.index(i), tree_len): toanalyze[image.meta['imageId']] = image return toanalyze
java
public static MutableIntTuple inclusiveScan( IntTuple t0, IntBinaryOperator op, MutableIntTuple result) { result = IntTuples.validate(t0, result); int n = t0.getSize(); if (n > 0) { result.set(0, t0.get(0)); for (int i=1; i<n; i++) { int operand0 = result.get(i-1); int operand1 = t0.get(i); int r = op.applyAsInt(operand0, operand1); result.set(i, r); } } return result; }
java
public static <V extends FeatureVector<?>> VectorFieldTypeInformation<V> typeRequest(Class<? super V> cls) { return new VectorFieldTypeInformation<>(cls, -1, Integer.MAX_VALUE); }
java
public void set(String name, Object obj) throws IOException { if (!(obj instanceof X500Name)) { throw new IOException("Attribute must be of type X500Name."); } if (name.equalsIgnoreCase(DN_NAME)) { this.dnName = (X500Name)obj; this.dnPrincipal = null; } else { throw new IOException("Attribute name not recognized by " + "CertAttrSet:CertificateIssuerName."); } }
java
public static List<Chart> getDetailCharts(ReportLayout reportLayout) { List<Chart> charts = new ArrayList<Chart>(); Band band = reportLayout.getDetailBand(); for (int i = 0, rows = band.getRowCount(); i < rows; i++) { List<BandElement> list = band.getRow(i); for (int j = 0, size = list.size(); j < size; j++) { BandElement be = list.get(j); if (be instanceof ChartBandElement) { charts.add(((ChartBandElement) be).getChart()); } } } return charts; }
java
@Override protected String serialize(InvocationContext context, Object input) { if(unavailable || incompatible) { throw new IllegalStateException(unavailable? ERROR_CONTEXT_UNAVAILABLE :ERROR_CONTEXT_INCOMPATIBLE); } if(input == null) { return null; } try { ByteArrayOutputStream baos = new ByteArrayOutputStream(); Persister_write.invoke(persister, input, baos); return baos.toString(); } catch (Exception e) { throw new SerializerException(new StringBuilder("XML serialization failed for request <") .append(context.getRequest().getName()) .append("> on endpoint <") .append(context.getEndpoint().getName()) .append(">").toString(), e); } }
java
public void setCPInstanceService( com.liferay.commerce.product.service.CPInstanceService cpInstanceService) { this.cpInstanceService = cpInstanceService; }
python
def get_port_profile_status_input_request_type_getnext_request_last_received_port_profile_info_profile_mac(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_port_profile_status = ET.Element("get_port_profile_status") config = get_port_profile_status input = ET.SubElement(get_port_profile_status, "input") request_type = ET.SubElement(input, "request-type") getnext_request = ET.SubElement(request_type, "getnext-request") last_received_port_profile_info = ET.SubElement(getnext_request, "last-received-port-profile-info") profile_mac = ET.SubElement(last_received_port_profile_info, "profile-mac") profile_mac.text = kwargs.pop('profile_mac') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def _process_one_indirect_jump(self, jump): """ Resolve a given indirect jump. :param IndirectJump jump: The IndirectJump instance. :return: A set of resolved indirect jump targets (ints). """ resolved = False resolved_by = None targets = None block = self._lift(jump.addr, opt_level=1) for resolver in self.indirect_jump_resolvers: resolver.base_state = self._base_state if not resolver.filter(self, jump.addr, jump.func_addr, block, jump.jumpkind): continue resolved, targets = resolver.resolve(self, jump.addr, jump.func_addr, block, jump.jumpkind) if resolved: resolved_by = resolver break if resolved: self._indirect_jump_resolved(jump, jump.addr, resolved_by, targets) else: self._indirect_jump_unresolved(jump) return set() if targets is None else set(targets)
java
public ServiceFuture<StreamingJobInner> beginCreateOrReplaceAsync(String resourceGroupName, String jobName, StreamingJobInner streamingJob, String ifMatch, String ifNoneMatch, final ServiceCallback<StreamingJobInner> serviceCallback) { return ServiceFuture.fromHeaderResponse(beginCreateOrReplaceWithServiceResponseAsync(resourceGroupName, jobName, streamingJob, ifMatch, ifNoneMatch), serviceCallback); }
python
def prettify(amount, separator=','): """Separate with predefined separator.""" orig = str(amount) new = re.sub("^(-?\d+)(\d{3})", "\g<1>{0}\g<2>".format(separator), str(amount)) if orig == new: return new else: return prettify(new)
python
def get_ip(request): """Return the IP address inside the HTTP_X_FORWARDED_FOR var inside the `request` object. The return of this function can be overrided by the `LOCAL_GEOLOCATION_IP` variable in the `conf` module. This function will skip local IPs (starting with 10. and equals to 127.0.0.1). """ if getsetting('LOCAL_GEOLOCATION_IP'): return getsetting('LOCAL_GEOLOCATION_IP') forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if not forwarded_for: return UNKNOWN_IP for ip in forwarded_for.split(','): ip = ip.strip() if not ip.startswith('10.') and not ip == '127.0.0.1': return ip return UNKNOWN_IP
java
public void nextRangeMaximumAvailable() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "nextRangeMaximumAvailable"); synchronized (_globalUniqueLock) { _globalUniqueThreshold = _globalUniqueLimit + _midrange; _globalUniqueLimit = _globalUniqueLimit + _range; } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "nextRangeMaximumAvailable"); }
java
public DecomposableMatch3<T, A, B, C> build() { return new DecomposableMatch3<>(fieldMatchers, extractedIndexes, fieldExtractor); }
python
def _parse_tree(self, node): """ Parse a <require> object """ self.kind = node.tag if 'compare' in node.attrib: self.compare = node.attrib['compare'] if 'version' in node.attrib: self.version = node.attrib['version'] self.value = node.text
python
def begin_transaction(self, project_id, transaction_options=None): """Perform a ``beginTransaction`` request. :type project_id: str :param project_id: The project to connect to. This is usually your project name in the cloud console. :type transaction_options: ~.datastore_v1.types.TransactionOptions :param transaction_options: (Optional) Options for a new transaction. :rtype: :class:`.datastore_pb2.BeginTransactionResponse` :returns: The returned protobuf response object. """ request_pb = _datastore_pb2.BeginTransactionRequest() return _rpc( self.client._http, project_id, "beginTransaction", self.client._base_url, request_pb, _datastore_pb2.BeginTransactionResponse, )
python
def get_table(self, tablename): """ Returns the table whoose name is tablename. """ temp = filter(lambda x: x.name == tablename, self.tables) if temp == list(): raise Exception("No such table") return temp[0]
python
async def AddPendingResources(self, addcharmwithauthorization, entity, resources): ''' addcharmwithauthorization : AddCharmWithAuthorization entity : Entity resources : typing.Sequence[~CharmResource] Returns -> typing.Union[_ForwardRef('ErrorResult'), typing.Sequence[str]] ''' # map input types to rpc msg _params = dict() msg = dict(type='Resources', request='AddPendingResources', version=1, params=_params) _params['AddCharmWithAuthorization'] = addcharmwithauthorization _params['Entity'] = entity _params['Resources'] = resources reply = await self.rpc(msg) return reply
python
def get_matcher(self, reqt): """ Get a version matcher for a requirement. :param reqt: The requirement :type reqt: str :return: A version matcher (an instance of :class:`distlib.version.Matcher`). """ try: matcher = self.scheme.matcher(reqt) except UnsupportedVersionError: # pragma: no cover # XXX compat-mode if cannot read the version name = reqt.split()[0] matcher = self.scheme.matcher(name) return matcher
java
public static String getCompressedStringFromGuid(Guid guid) { long[] num = new long[6]; char[][] str = new char[6][5]; int i,j,n; String result = new String(); // // Creation of six 32 Bit integers from the components of the GUID structure // num[0] = (long)(guid.Data1 / 16777216); // 16. byte (pGuid->Data1 / 16777216) is the same as (pGuid->Data1 >> 24) num[1] = (long)(guid.Data1 % 16777216); // 15-13. bytes (pGuid->Data1 % 16777216) is the same as (pGuid->Data1 & 0xFFFFFF) num[2] = (long)(guid.Data2 * 256 + guid.Data3 / 256); // 12-10. bytes num[3] = (long)((guid.Data3 % 256) * 65536 + guid.Data4[0] * 256 + guid.Data4[1]); // 09-07. bytes num[4] = (long)(guid.Data4[2] * 65536 + guid.Data4[3] * 256 + guid.Data4[4]); // 06-04. bytes num[5] = (long)(guid.Data4[5] * 65536 + guid.Data4[6] * 256 + guid.Data4[7]); // 03-01. bytes // // Conversion of the numbers into a system using a base of 64 // n = 3; for (i = 0; i < 6; i++) { if (!cv_to_64 (num[i], str[i], n)) { return null; } for(j = 0; j<str[i].length; j++) if(str[i][j]!= '\0') result += str[i][j]; n = 5; } return result; }
python
def find_nonterminals_reachable_by_unit_rules(grammar): # type: (Grammar) -> UnitSymbolReachability """ Get nonterminal for which exist unit rule :param grammar: Grammar where to search :return: Instance of UnitSymbolReachability. """ # get nonterminals nonterminals = list(grammar.nonterminals) # type: List[Type[Nonterminal]] count_of_nonterms = len(nonterminals) # create indexes for nonterminals nonterm_to_index = dict() # type: Dict[Type[Nonterminal], int] for i in range(count_of_nonterms): nonterm_to_index[nonterminals[i]] = i # prepare matrix field = [[None for _ in nonterminals] for _ in nonterminals] # type: MATRIX_OF_UNIT_RULES # fill existing unit rules for rule in grammar.rules: if _is_unit(rule): field[nonterm_to_index[rule.fromSymbol]][nonterm_to_index[rule.toSymbol]] = [rule] # run Floyd Warshall f = field for k in range(count_of_nonterms): for i in range(count_of_nonterms): for j in range(count_of_nonterms): if f[i][k] is not None and f[k][j] is not None: if f[i][j] is None or len(f[i][j]) > len(f[i][k]) + len(f[k][j]): f[i][j] = f[i][k] + f[k][j] # return results return UnitSymbolReachability(f, nonterm_to_index)
python
def re_size(image, factor=1): """ resizes image with nx x ny to nx/factor x ny/factor :param image: 2d image with shape (nx,ny) :param factor: integer >=1 :return: """ if factor < 1: raise ValueError('scaling factor in re-sizing %s < 1' %factor) f = int(factor) nx, ny = np.shape(image) if int(nx/f) == nx/f and int(ny/f) == ny/f: small = image.reshape([int(nx/f), f, int(ny/f), f]).mean(3).mean(1) return small else: raise ValueError("scaling with factor %s is not possible with grid size %s, %s" %(f, nx, ny))
java
public static ProtocolNegotiator serverPlaintext() { return new ProtocolNegotiator() { @Override public ChannelHandler newHandler(final GrpcHttp2ConnectionHandler handler) { class PlaintextHandler extends ChannelHandlerAdapter { @Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { // Set sttributes before replace to be sure we pass it before accepting any requests. handler.handleProtocolNegotiationCompleted(Attributes.newBuilder() .set(Grpc.TRANSPORT_ATTR_REMOTE_ADDR, ctx.channel().remoteAddress()) .set(Grpc.TRANSPORT_ATTR_LOCAL_ADDR, ctx.channel().localAddress()) .build(), /*securityInfo=*/ null); // Just replace this handler with the gRPC handler. ctx.pipeline().replace(this, null, handler); } } return new PlaintextHandler(); } @Override public void close() {} @Override public AsciiString scheme() { return Utils.HTTP; } }; }
java
public static <T> Iterable<T> merge(Iterable<T>... iterables) { return merge(null, iterables); }
python
def diffusionCount(source, target, sourceType = "raw", extraValue = None, pandasFriendly = False, compareCounts = False, numAuthors = True, useAllAuthors = True, _ProgBar = None, extraMapping = None): """Takes in two [RecordCollections](../classes/RecordCollection.html#metaknowledge.RecordCollection) and produces a `dict` counting the citations of _source_ by the [Records](../classes/Record.html#metaknowledge.Record) of _target_. By default the `dict` uses `Record` objects as keys but this can be changed with the _sourceType_ keyword to any of the WOS tags. # Parameters _source_ : `RecordCollection` > A metaknowledge `RecordCollection` containing the `Records` being cited _target_ : `RecordCollection` > A metaknowledge `RecordCollection` containing the `Records` citing those in _source_ _sourceType_ : `optional [str]` > default `'raw'`, if `'raw'` the returned `dict` will contain `Records` as keys. If it is a WOS tag the keys will be of that type. _pandasFriendly_ : `optional [bool]` > default `False`, makes the output be a dict with two keys one `"Record"` is the list of Records ( or data type requested by _sourceType_) the other is their occurrence counts as `"Counts"`. The lists are the same length. _compareCounts_ : `optional [bool]` > default `False`, if `True` the diffusion analysis will be run twice, first with source and target setup like the default (global scope) then using only the source `RecordCollection` (local scope). _extraValue_ : `optional [str]` > default `None`, if a tag the returned dictionary will have `Records` mapped to maps, these maps will map the entries for the tag to counts. If _pandasFriendly_ is also `True` the resultant dictionary will have an additional column called `'year'`. This column will contain the year the citations occurred, in addition the Records entries will be duplicated for each year they occur in. > For example if `'year'` was given then the count for a single `Record` could be `{1990 : 1, 2000 : 5}` _useAllAuthors_ : `optional [bool]` > default `True`, if `False` only the first author will be used to generate the `Citations` for the _source_ `Records` # Returns `dict[:int]` > A dictionary with the type given by _sourceType_ as keys and integers as values. > If _compareCounts_ is `True` the values are tuples with the first integer being the diffusion in the target and the second the diffusion in the source. > If _pandasFriendly_ is `True` the returned dict has keys with the names of the WOS tags and lists with their values, i.e. a table with labeled columns. The counts are in the column named `"TargetCount"` and if _compareCounts_ the local count is in a column called `"SourceCount"`. """ sourceCountString = "SourceCount" targetCountString = "TargetCount" if not isinstance(sourceType, str): raise RuntimeError("{} is not a valid node type, only tags or the string 'raw' are allowed".format(sourceType)) if not isinstance(source, RecordCollection) or not isinstance(target, RecordCollection): raise RuntimeError("Source and target must be RecordCollections.") if extraValue is not None and not isinstance(extraValue, str): raise RuntimeError("{} is not a valid extraValue, only tags are allowed".format(extraValue)) if extraMapping is None: extraMapping = lambda x : x if metaknowledge.VERBOSE_MODE or _ProgBar: if _ProgBar: PBar = _ProgBar PBar.updateVal(0, "Starting to analyse a diffusion network") else: PBar = _ProgressBar(0, "Starting to analyse a diffusion network") count = 0 maxCount = len(source) else: PBar = _ProgressBar("Starting to analyse a diffusion network", dummy = True) count = 0 maxCount = len(source) sourceDict = {} #Tells the function if the IDs are made of lists or of str listIds = None for Rs in source: if listIds is None and Rs.get(sourceType) is not None: listIds = isinstance(Rs.get(sourceType), list) count += 1 PBar.updateVal(count / maxCount * .10, "Analyzing source: " + str(Rs)) RsVal, RsExtras = makeNodeID(Rs, sourceType) if RsVal: if useAllAuthors: for c in Rs.createCitation(multiCite = True): sourceDict[c] = RsVal else: sourceDict[Rs.createCitation()] = RsVal if extraValue is not None: if listIds: sourceCounts = {s : {targetCountString : 0} for s in itertools.chain.from_iterable(sourceDict.values())} else: sourceCounts = {s : {targetCountString : 0} for s in sourceDict.values()} else: if listIds: sourceCounts = {s : 0 for s in itertools.chain.from_iterable(sourceDict.values())} else: sourceCounts = {s : 0 for s in sourceDict.values()} count = 0 maxCount = len(target) PBar.updateVal(.10, "Done analyzing sources, starting on targets") for Rt in target: count += 1 PBar.updateVal(count / maxCount * .90 + .10, "Analyzing target: {}".format(Rt)) targetCites = Rt.get('citations', []) if extraValue is not None: values = Rt.get(extraValue, []) if values is None: values = [] elif not isinstance(values, list): values = [values] values = [extraMapping(val) for val in values] for c in targetCites: try: RsourceVals = sourceDict[c] except KeyError: continue if listIds: for sVal in RsourceVals: if extraValue: sourceCounts[sVal][targetCountString] += 1 for val in values: try: sourceCounts[sVal][val] += 1 except KeyError: sourceCounts[sVal][val] = 1 else: sourceCounts[sVal] += 1 else: if extraValue: sourceCounts[RsourceVals][targetCountString] += 1 for val in values: try: sourceCounts[RsourceVals][val] += 1 except KeyError: sourceCounts[RsourceVals][val] = 1 else: sourceCounts[RsourceVals] += 1 if compareCounts: localCounts = diffusionCount(source, source, sourceType = sourceType, pandasFriendly = False, compareCounts = False, extraValue = extraValue, _ProgBar = PBar) if PBar and not _ProgBar: PBar.finish("Done counting the diffusion of {} sources into {} targets".format(len(source), len(target))) if pandasFriendly: retDict = {targetCountString : []} if numAuthors: retDict["numAuthors"] = [] if compareCounts: retDict[sourceCountString] = [] if extraValue is not None: retDict[extraValue] = [] if sourceType == 'raw': retrievedFields = [] targetCount = [] for R in sourceCounts.keys(): tagsLst = [t for t in R.keys() if t not in retrievedFields] retrievedFields += tagsLst for tag in retrievedFields: retDict[tag] = [] for R, occ in sourceCounts.items(): if extraValue: Rvals = R.subDict(retrievedFields) for extraVal, occCount in occ.items(): retDict[extraValue].append(extraVal) if numAuthors: retDict["numAuthors"].append(len(R.get('authorsShort'))) for tag in retrievedFields: retDict[tag].append(Rvals[tag]) retDict[targetCountString].append(occCount) if compareCounts: try: retDict[sourceCountString].append(localCounts[R][extraVal]) except KeyError: retDict[sourceCountString].append(0) else: Rvals = R.subDict(retrievedFields) if numAuthors: retDict["numAuthors"].append(len(R.get('authorsShort'))) for tag in retrievedFields: retDict[tag].append(Rvals[tag]) retDict[targetCountString].append(occ) if compareCounts: retDict[sourceCountString].append(localCounts[R]) else: countLst = [] recLst = [] locLst = [] if extraValue: extraValueLst = [] for R, occ in sourceCounts.items(): if extraValue: for extraVal, occCount in occ.items(): countLst.append(occCount) recLst.append(R) extraValueLst.append(extraVal) if compareCounts: try: locLst.append(localCounts[R][extraValue]) except KeyError: locLst.append(0) else: countLst.append(occ) recLst.append(R) if compareCounts: locLst.append(localCounts[R]) if compareCounts: retDict = {sourceType : recLst, targetCountString : countLst, sourceCountString : locLst} else: retDict = {sourceType : recLst, targetCountString : countLst} if extraValue: retDict[extraValue] = extraValueLst return retDict else: if compareCounts: for R, occ in localCounts.items(): sourceCounts[R] = (sourceCounts[R], occ) return sourceCounts
java
public void marshall(PutSigningProfileRequest putSigningProfileRequest, ProtocolMarshaller protocolMarshaller) { if (putSigningProfileRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(putSigningProfileRequest.getProfileName(), PROFILENAME_BINDING); protocolMarshaller.marshall(putSigningProfileRequest.getSigningMaterial(), SIGNINGMATERIAL_BINDING); protocolMarshaller.marshall(putSigningProfileRequest.getPlatformId(), PLATFORMID_BINDING); protocolMarshaller.marshall(putSigningProfileRequest.getOverrides(), OVERRIDES_BINDING); protocolMarshaller.marshall(putSigningProfileRequest.getSigningParameters(), SIGNINGPARAMETERS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def update_mandb(self, quiet=True): """Update mandb.""" if not environ.config.UpdateManPath: return print('\nrunning mandb...') cmd = 'mandb %s' % (' -q' if quiet else '') subprocess.Popen(cmd, shell=True).wait()
java
@SuppressWarnings("unchecked") public <T> T readAsObject(byte[] content, Class<T> type) { try { if (content == null || content.length == 0 || type == null) { log.debug("content为{},type为:{}", content, type); return null; } else if (type.equals(String.class)) { return (T) new String(content); } return MAPPER.readValue(content, type); } catch (Exception e) { log.error("json解析失败,失败原因:", e); return null; } }
java
@Nullable @SuppressWarnings("FloatingPointEquality") private static HttpEncodingType determineEncoding(String acceptEncoding) { float starQ = -1.0f; float gzipQ = -1.0f; float deflateQ = -1.0f; for (String encoding : acceptEncoding.split(",")) { float q = 1.0f; final int equalsPos = encoding.indexOf('='); if (equalsPos != -1) { try { q = Float.parseFloat(encoding.substring(equalsPos + 1)); } catch (NumberFormatException e) { // Ignore encoding q = 0.0f; } } if (encoding.contains("*")) { starQ = q; } else if (encoding.contains("gzip") && q > gzipQ) { gzipQ = q; } else if (encoding.contains("deflate") && q > deflateQ) { deflateQ = q; } } if (gzipQ > 0.0f || deflateQ > 0.0f) { if (gzipQ >= deflateQ) { return HttpEncodingType.GZIP; } else { return HttpEncodingType.DEFLATE; } } if (starQ > 0.0f) { if (gzipQ == -1.0f) { return HttpEncodingType.GZIP; } if (deflateQ == -1.0f) { return HttpEncodingType.DEFLATE; } } return null; }
java
public List<SequenceState<S, O, D>> computeMostLikelySequence() { if (message == null) { // Return empty most likely sequence if there are no time steps or if initial // observations caused an HMM break. return new ArrayList<>(); } else { return retrieveMostLikelySequence(); } }
java
public void removePropertyChangeListener( String propertyName, PropertyChangeListener listener) { if (listener == null || propertyName == null) { return; } listener = this.map.extract(listener); if (listener != null) { this.map.remove(propertyName, listener); } }
python
def adjustColors(self, mode='dark'): """ Change a few colors depending on the mode to use. The default mode doesn't assume anything and avoid using white & black colors. The dark mode use white and avoid dark blue while the light mode use black and avoid yellow, to give a few examples. """ rp = Game.__color_modes.get(mode, {}) for k, color in self.__colors.items(): self.__colors[k] = rp.get(color, color)
java
@Override public com.liferay.commerce.model.CommerceShipmentItem updateCommerceShipmentItem( com.liferay.commerce.model.CommerceShipmentItem commerceShipmentItem) { return _commerceShipmentItemLocalService.updateCommerceShipmentItem(commerceShipmentItem); }
python
def temporary_unavailable(request, template_name='503.html'): """ Default 503 handler, which looks for the requested URL in the redirects table, redirects if found, and displays 404 page if not redirected. Templates: ``503.html`` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ context = { 'request_path': request.path, } return http.HttpResponseTemporaryUnavailable( render_to_string(template_name, context))
python
def _recoverable(self, method, *args, **kwargs): """Wraps a method to recover the stream and retry on error. If a retryable error occurs while making the call, then the stream will be re-opened and the method will be retried. This happens indefinitely so long as the error is a retryable one. If an error occurs while re-opening the stream, then this method will raise immediately and trigger finalization of this object. Args: method (Callable[..., Any]): The method to call. args: The args to pass to the method. kwargs: The kwargs to pass to the method. """ while True: try: return method(*args, **kwargs) except Exception as exc: with self._operational_lock: _LOGGER.debug("Call to retryable %r caused %s.", method, exc) if not self._should_recover(exc): self.close() _LOGGER.debug("Not retrying %r due to %s.", method, exc) self._finalize(exc) raise exc _LOGGER.debug("Re-opening stream from retryable %r.", method) self._reopen()
java
public Map<String, Object> executeCdpCommand(String commandName, Map<String, Object> parameters) { Objects.requireNonNull(commandName, "Command name must be set."); Objects.requireNonNull(parameters, "Parameters for command must be set."); @SuppressWarnings("unchecked") Map<String, Object> toReturn = (Map<String, Object>) getExecuteMethod().execute( ChromeDriverCommand.EXECUTE_CDP_COMMAND, ImmutableMap.of("cmd", commandName, "params", parameters)); return ImmutableMap.copyOf(toReturn); }
java
private int doWriteMultiple(ChannelOutboundBuffer in) throws Exception { final long maxBytesPerGatheringWrite = config().getMaxBytesPerGatheringWrite(); IovArray array = ((EpollEventLoop) eventLoop()).cleanIovArray(); array.maxBytes(maxBytesPerGatheringWrite); in.forEachFlushedMessage(array); if (array.count() >= 1) { // TODO: Handle the case where cnt == 1 specially. return writeBytesMultiple(in, array); } // cnt == 0, which means the outbound buffer contained empty buffers only. in.removeBytes(0); return 0; }
python
def to_geopandas(raster, **kwargs): """ Convert GeoRaster to GeoPandas DataFrame, which can be easily exported to other types of files and used to do other types of operations. The DataFrame has the geometry (Polygon), row, col, value, x, and y values for each cell Usage: df = gr.to_geopandas(raster) """ df = to_pandas(raster, **kwargs) df['geometry'] = df.apply(squares, georaster=raster, axis=1) df = gp.GeoDataFrame(df, crs=from_string(raster.projection.ExportToProj4())) return df
java
void measure(Registry registry, JmxData data, List<Measurement> ms) { Map<String, String> tags = tagMappings.entrySet().stream().collect(Collectors.toMap( Map.Entry::getKey, e -> MappingExpr.substitute(e.getValue(), data.getStringAttrs()) )); Id id = registry .createId(MappingExpr.substitute(nameMapping, data.getStringAttrs())) .withTags(tags); Map<String, Number> numberAttrs = new HashMap<>(data.getNumberAttrs()); JmxData previous = previousData.put(data.getName(), data); if (previous != null) { previous.getNumberAttrs().forEach((key, value) -> numberAttrs.put("previous:" + key, value)); } Double v = MappingExpr.eval(valueMapping, numberAttrs); if (v != null && !v.isNaN()) { if (counter) { updateCounter(registry, id, v.longValue()); } else { ms.add(new Measurement(id, registry.clock().wallTime(), v)); } } }
python
def dumpJSON(self): """ Encodes current parameters to JSON compatible dictionary """ numexp = self.number.get() expTime, _, _, _, _ = self.timing() if numexp == 0: numexp = -1 data = dict( numexp=self.number.value(), app=self.app.value(), led_flsh=self.led(), dummy_out=self.dummy(), fast_clks=self.fastClk(), readout=self.readSpeed(), dwell=self.expose.value(), exptime=expTime, oscan=self.oscan(), oscany=self.oscany(), xbin=self.wframe.xbin.value(), ybin=self.wframe.ybin.value(), multipliers=self.nmult.getall(), clear=self.clear() ) # only allow nodding in clear mode, even if GUI has got confused if data['clear'] and self.nodPattern: data['nodpattern'] = self.nodPattern # no mixing clear and multipliers, no matter what GUI says if data['clear']: data['multipliers'] = [1 for i in self.nmult.getall()] # add window mode if not self.isFF(): if self.isDrift(): # no clear, multipliers or oscan in drift for setting in ('clear', 'oscan', 'oscany'): data[setting] = 0 data['multipliers'] = [1 for i in self.nmult.getall()] for iw, (xsl, xsr, ys, nx, ny) in enumerate(self.wframe): data['x{}start_left'.format(iw+1)] = xsl data['x{}start_right'.format(iw+1)] = xsr data['y{}start'.format(iw+1)] = ys data['y{}size'.format(iw+1)] = ny data['x{}size'.format(iw+1)] = nx else: # no oscany in window mode data['oscany'] = 0 for iw, (xsll, xsul, xslr, xsur, ys, nx, ny) in enumerate(self.wframe): data['x{}start_upperleft'.format(iw+1)] = xsul data['x{}start_lowerleft'.format(iw+1)] = xsll data['x{}start_upperright'.format(iw+1)] = xsur data['x{}start_lowerright'.format(iw+1)] = xslr data['y{}start'.format(iw+1)] = ys data['x{}size'.format(iw+1)] = nx data['y{}size'.format(iw+1)] = ny return data
python
def is_gene_list(bed_file): """Check if the file is only a list of genes, not a BED """ with utils.open_gzipsafe(bed_file) as in_handle: for line in in_handle: if not line.startswith("#"): if len(line.split()) == 1: return True else: return False
java
protected I fixIndex(I idx) { //return idx; if (idx.size() < 2) { return idx; } NavigableMap<Integer, E> map = idx.getMapByNum(); Iterator<? extends Map.Entry<Integer, E>> it = map.entrySet().iterator(); // we have at least 2 elements in the iterator Map.Entry<Integer, E> curr = it.next(); while (it.hasNext()) { Map.Entry<Integer, E> next = it.next(); OffsetLength currOfflen = curr.getValue().getOffsetLength(); OffsetLength nextOfflen = next.getValue().getOffsetLength(); curr.getValue().setOffsetLength( new OffsetLength(currOfflen.offset, (int) (nextOfflen.offset - currOfflen.offset))); curr = next; } return idx; }
python
def getitem_in(obj, name): """ Finds a key in @obj via a period-delimited string @name. @obj: (#dict) @name: (#str) |.|-separated keys to search @obj in .. obj = {'foo': {'bar': {'baz': True}}} getitem_in(obj, 'foo.bar.baz') .. |True| """ for part in name.split('.'): obj = obj[part] return obj
java
@Override public void init(String jsonString) throws IndexerException { try { config = new JsonSimpleConfig(jsonString); init(); } catch (IOException e) { throw new IndexerException(e); } }