language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def form_b(self, n: float)->tuple: """ formats a bps as bps/kbps/mbps/gbps etc handles whether its meant to be in bytes :param n: input float :rtype tuple: :return: tuple of float-number of mbps etc, str-units """ unit = 'bps' kilo = 1000 mega = 1000000 giga = 1000000000 bps = 0 if self.units == 'bytes' or self.units == 'B': unit = 'Bps' kilo = 8000 mega = 8000000 giga = 8000000000 if n < kilo: bps = float(n) if n >= kilo and n < mega: unit = "K" + unit bps = float(n / 1024.0) if n >= mega and n < giga: unit = "M" + unit bps = float(n / (1024.0 * 1024.0)) if n >= giga: unit = "G" + unit bps = float(n / (1024.0 * 1024.0 * 1024.0)) return bps, unit
java
public static <V> PnkyPromise<List<V>> all( final Iterable<? extends PnkyPromise<? extends V>> promises) { final Pnky<List<V>> pnky = Pnky.create(); final int numberOfPromises = Iterables.size(promises); // Special case, no promises to wait for if (numberOfPromises == 0) { return Pnky.immediatelyComplete(Collections.<V> emptyList()); } final AtomicInteger remaining = new AtomicInteger(numberOfPromises); @SuppressWarnings("unchecked") final V[] results = (V[]) new Object[numberOfPromises]; final Throwable[] errors = new Throwable[numberOfPromises]; final AtomicBoolean failed = new AtomicBoolean(); int i = 0; for (final PnkyPromise<? extends V> promise : promises) { final int promiseNumber = i++; promise.alwaysAccept(new ThrowingBiConsumer<V, Throwable>() { @Override public void accept(final V result, final Throwable error) throws Throwable { results[promiseNumber] = result; errors[promiseNumber] = error; if (error != null) { failed.set(true); } if (remaining.decrementAndGet() == 0) { if (failed.get()) { pnky.reject(new CombinedException(Arrays.asList(errors))); } else { pnky.resolve(Arrays.asList(results)); } } } }); } return pnky; }
python
def override_if_not_in_args(flag, argument, args): """Checks if flags is in args, and if not it adds the flag to args.""" if flag not in args: args.extend([flag, argument])
java
public boolean registerConsumerSetMonitor( DestinationHandler topicSpace, String discriminatorExpression, ConnectionImpl connection, ConsumerSetChangeCallback callback) throws SIDiscriminatorSyntaxException, SIErrorException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry( tc, "registerConsumerSetMonitor", new Object[] { topicSpace, discriminatorExpression, connection, callback }); boolean isWildcarded = isWildCarded(discriminatorExpression); // Get the uuid for the topicspace SIBUuid12 topicSpaceUuid = topicSpace.getBaseUuid(); String topicSpaceStr = topicSpaceUuid.toString(); // Combine the topicSpace and topic String tExpression = buildAddTopicExpression(topicSpaceStr, discriminatorExpression); String wildcardStem = null; if(isWildcarded) { // Retrieve the non-wildcarded stem wildcardStem = retrieveNonWildcardStem(tExpression); } boolean areConsumers = false; // Register under a lock on the targets table synchronized(_targets) { areConsumers = _consumerMonitoring.registerConsumerSetMonitor(connection, topicSpace, topicSpaceUuid, discriminatorExpression, tExpression, callback, isWildcarded, wildcardStem, this); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "registerConsumerSetMonitor", Boolean.valueOf(areConsumers)); return areConsumers; }
java
public void setConfigurationIndex(int revision) throws IOException { if (revision < 1) { throw new IllegalArgumentException("revision must be greater than or equal to 1"); } this.configurationIndex = revision; if (this.started) { advertiser.setConfigurationIndex(revision); } }
java
public void setInstance(String newInstance) { String oldInstance = instance; instance = newInstance; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, BpsimPackage.PARAMETER_VALUE__INSTANCE, oldInstance, instance)); }
python
def set_speed(self, speed=None, auto=False, adaptive=False): """Sets the speed of the JTAG communication with the ARM core. If no arguments are present, automatically detects speed. If a ``speed`` is provided, the speed must be no larger than ``JLink.MAX_JTAG_SPEED`` and no smaller than ``JLink.MIN_JTAG_SPEED``. The given ``speed`` can also not be ``JLink.INVALID_JTAG_SPEED``. Args: self (JLink): the ``JLink`` instance speed (int): the speed in kHz to set the communication at auto (bool): automatically detect correct speed adaptive (bool): select adaptive clocking as JTAG speed Returns: ``None`` Raises: TypeError: if given speed is not a natural number. ValueError: if given speed is too high, too low, or invalid. """ if speed is None: speed = 0 elif not util.is_natural(speed): raise TypeError('Expected positive number for speed, given %s.' % speed) elif speed > self.MAX_JTAG_SPEED: raise ValueError('Given speed exceeds max speed of %d.' % self.MAX_JTAG_SPEED) elif speed < self.MIN_JTAG_SPEED: raise ValueError('Given speed is too slow. Minimum is %d.' % self.MIN_JTAG_SPEED) if auto: speed = speed | self.AUTO_JTAG_SPEED if adaptive: speed = speed | self.ADAPTIVE_JTAG_SPEED self._dll.JLINKARM_SetSpeed(speed) return None
python
def _pvi_path(granule): """Determine the PreView Image (PVI) path inside the SAFE pkg.""" pvi_name = granule._metadata.iter("PVI_FILENAME").next().text pvi_name = pvi_name.split("/") pvi_path = os.path.join( granule.granule_path, pvi_name[len(pvi_name)-2], pvi_name[len(pvi_name)-1] ) try: assert os.path.isfile(pvi_path) or \ pvi_path in granule.dataset._zipfile.namelist() except (AssertionError, AttributeError): return None return pvi_path
java
public OvhRecord zone_zoneName_record_POST(String zoneName, OvhNamedResolutionFieldTypeEnum fieldType, String subDomain, String target, Long ttl) throws IOException { String qPath = "/domain/zone/{zoneName}/record"; StringBuilder sb = path(qPath, zoneName); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "fieldType", fieldType); addBody(o, "subDomain", subDomain); addBody(o, "target", target); addBody(o, "ttl", ttl); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhRecord.class); }
java
protected String computeCurrentFolder() { String currentFolder = getSettings().getExplorerResource(); if (currentFolder == null) { // set current folder to root folder try { currentFolder = getCms().getSitePath(getCms().readFolder("/", CmsResourceFilter.IGNORE_EXPIRATION)); } catch (CmsException e) { // can usually be ignored if (LOG.isInfoEnabled()) { LOG.info(e); } currentFolder = "/"; } } if (!currentFolder.endsWith("/")) { // add folder separator to currentFolder currentFolder += "/"; } return currentFolder; }
java
@Nullable public T poll() throws Exception { if (exception != null) throw exception; if (put != null && willBeExhausted()) { T item = doPoll(); SettablePromise<Void> put = this.put; this.put = null; put.set(null); return item; } return !isEmpty() ? doPoll() : null; }
java
@Override public java.util.List<com.liferay.commerce.product.model.CPOption> getCPOptionsByUuidAndCompanyId( String uuid, long companyId) { return _cpOptionLocalService.getCPOptionsByUuidAndCompanyId(uuid, companyId); }
java
public static double constraint(TrifocalTensor tensor, Point2D_F64 p1, Vector3D_F64 l2, Vector3D_F64 l3) { DMatrixRMaj sum = new DMatrixRMaj(3,3); CommonOps_DDRM.add(p1.x,tensor.T1,sum,sum); CommonOps_DDRM.add(p1.y,tensor.T2,sum,sum); CommonOps_DDRM.add(tensor.T3, sum, sum); return GeometryMath_F64.innerProd(l2,sum,l3); }
java
private void setChromeOptions(final DesiredCapabilities capabilities, ChromeOptions chromeOptions) { // Set custom downloaded file path. When you check content of downloaded file by robot. final HashMap<String, Object> chromePrefs = new HashMap<>(); chromePrefs.put("download.default_directory", System.getProperty(USER_DIR) + File.separator + DOWNLOADED_FILES_FOLDER); chromeOptions.setExperimentalOption("prefs", chromePrefs); // Set custom chromium (if you not use default chromium on your target device) final String targetBrowserBinaryPath = Context.getWebdriversProperties("targetBrowserBinaryPath"); if (targetBrowserBinaryPath != null && !"".equals(targetBrowserBinaryPath)) { chromeOptions.setBinary(targetBrowserBinaryPath); } capabilities.setCapability(ChromeOptions.CAPABILITY, chromeOptions); }
java
public void filterPaths(PathImpl path, String prefix, PathCallback cb) { if (! path.exists() || ! path.canRead()) { return; } if (! isValidPrefix(path, prefix)) { return; } if (path.isDirectory()) { try { String []list = path.list(); for (int i = 0; i < list.length; i++) { String name = list[i]; if (".".equals(name) || "..".equals(name)) continue; // jsp/187j PathImpl subpath = path.lookup("./" + name); filterPaths(subpath, prefix, cb); } } catch (IOException e) { log.log(Level.WARNING, e.toString(), e); } } if (path.exists()) { // server/2438 - logging on unreadable // if (path.canRead()) { if (isMatch(path, prefix)) { String suffix = ""; String fullPath = path.getPath(); if (prefix.length() < fullPath.length()) suffix = path.getPath().substring(prefix.length()); path.setUserPath(_userPathPrefix + suffix); cb.onMatch(path); } } }
java
@Override public String readMultiByte(int length, String charSet) { return dataInput.readMultiByte(length, charSet); }
python
def _parse(self, infile): """Actually parse the config file.""" temp_list_values = self.list_values if self.unrepr: self.list_values = False comment_list = [] done_start = False this_section = self maxline = len(infile) - 1 cur_index = -1 reset_comment = False while cur_index < maxline: if reset_comment: comment_list = [] cur_index += 1 line = infile[cur_index] sline = line.strip() # do we have anything on the line ? if not sline or sline.startswith('#'): reset_comment = False comment_list.append(line) continue if not done_start: # preserve initial comment self.initial_comment = comment_list comment_list = [] done_start = True reset_comment = True # first we check if it's a section marker mat = self._sectionmarker.match(line) if mat is not None: # is a section line (indent, sect_open, sect_name, sect_close, comment) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent cur_depth = sect_open.count('[') if cur_depth != sect_close.count(']'): self._handle_error("Cannot compute the section depth at line %s.", NestingError, infile, cur_index) continue if cur_depth < this_section.depth: # the new section is dropping back to a previous level try: parent = self._match_depth(this_section, cur_depth).parent except SyntaxError: self._handle_error("Cannot compute nesting level at line %s.", NestingError, infile, cur_index) continue elif cur_depth == this_section.depth: # the new section is a sibling of the current section parent = this_section.parent elif cur_depth == this_section.depth + 1: # the new section is a child the current section parent = this_section else: self._handle_error("Section too nested at line %s.", NestingError, infile, cur_index) sect_name = self._unquote(sect_name) if sect_name in parent: self._handle_error('Duplicate section name at line %s.', DuplicateError, infile, cur_index) continue # create the new section this_section = Section( parent, cur_depth, self, name=sect_name) parent[sect_name] = this_section parent.inline_comments[sect_name] = comment parent.comments[sect_name] = comment_list continue # # it's not a section marker, # so it should be a valid ``key = value`` line mat = self._keyword.match(line) if mat is None: # it neither matched as a keyword # or a section marker self._handle_error( 'Invalid line at line "%s".', ParseError, infile, cur_index) else: # is a keyword value # value will include any inline comment (indent, key, value) = mat.groups() if indent and (self.indent_type is None): self.indent_type = indent # check for a multiline value if value[:3] in ['"""', "'''"]: try: value, comment, cur_index = self._multiline( value, infile, cur_index, maxline) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if type(e) == UnknownType: msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: if self.unrepr: comment = '' try: value = unrepr(value) except Exception as e: if isinstance(e, UnknownType): msg = 'Unknown name or type in value at line %s.' else: msg = 'Parse error in value at line %s.' self._handle_error(msg, UnreprError, infile, cur_index) continue else: # extract comment and lists try: (value, comment) = self._handle_value(value) except SyntaxError: self._handle_error( 'Parse error in value at line %s.', ParseError, infile, cur_index) continue # key = self._unquote(key) if key in this_section: self._handle_error( 'Duplicate keyword name at line %s.', DuplicateError, infile, cur_index) continue # add the key. # we set unrepr because if we have got this far we will never # be creating a new section this_section.__setitem__(key, value, unrepr=True) this_section.inline_comments[key] = comment this_section.comments[key] = comment_list continue # if self.indent_type is None: # no indentation used, set the type accordingly self.indent_type = '' # preserve the final comment if not self and not self.initial_comment: self.initial_comment = comment_list elif not reset_comment: self.final_comment = comment_list self.list_values = temp_list_values
python
def apply(self, func, *args, **kwargs): """Apply the provided function and combine the results together in the same way as apply from groupby in pandas. This returns a DataFrame. """ self._prep_pandas_groupby() def key_by_index(data): """Key each row by its index. """ # TODO: Is there a better way to do this? for key, row in data.iterrows(): yield (key, pd.DataFrame.from_dict( dict([(key, row)]), orient='index')) myargs = self._myargs mykwargs = self._mykwargs regroupedRDD = self._distributedRDD.mapValues( lambda data: data.groupby(*myargs, **mykwargs)) appliedRDD = regroupedRDD.map( lambda key_data: key_data[1].apply(func, *args, **kwargs)) reKeyedRDD = appliedRDD.flatMap(key_by_index) dataframe = self._sortIfNeeded(reKeyedRDD).values() return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx)
python
def sym_log_map(cls, q, p): """Quaternion symmetrized logarithm map. Find the symmetrized logarithm map on the quaternion Riemannian manifold. Params: q: the base point at which the logarithm is computed, i.e. a Quaternion object p: the argument of the quaternion map, a Quaternion object Returns: A tangent vector corresponding to the symmetrized geodesic curve formulation. Note: Information on the symmetrized formulations given in [Source](https://www.researchgate.net/publication/267191489_Riemannian_L_p_Averaging_on_Lie_Group_of_Nonzero_Quaternions). """ inv_sqrt_q = (q ** (-0.5)) return Quaternion.log(inv_sqrt_q * p * inv_sqrt_q)
python
def get_index(self, name): """get an index by name TODO: Combine indexes of relevant catalogs depending on the portal_type which is searched for. """ catalog = self.get_catalog() index = catalog._catalog.getIndex(name) logger.debug("get_index={} of catalog '{}' --> {}".format( name, catalog.__name__, index)) return index
python
def element_to_objects( element: etree.ElementTree, sender: str, sender_key_fetcher:Callable[[str], str]=None, user: UserType =None, ) -> List: """Transform an Element to a list of entities recursively. Possible child entities are added to each entity ``_children`` list. :param tree: Element :param sender: Payload sender id :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have an ``id``. :returns: list of entities """ entities = [] cls = MAPPINGS.get(element.tag) if not cls: return [] attrs = xml_children_as_dict(element) transformed = transform_attributes(attrs, cls) if hasattr(cls, "fill_extra_attributes"): transformed = cls.fill_extra_attributes(transformed) entity = cls(**transformed) # Add protocol name entity._source_protocol = "diaspora" # Save element object to entity for possible later use entity._source_object = etree.tostring(element) # Save receiving id to object if user: entity._receiving_actor_id = user.id if issubclass(cls, DiasporaRelayableMixin): # If relayable, fetch sender key for validation entity._xml_tags = get_element_child_info(element, "tag") if sender_key_fetcher: entity._sender_key = sender_key_fetcher(entity.actor_id) else: profile = retrieve_and_parse_profile(entity.handle) if profile: entity._sender_key = profile.public_key else: # If not relayable, ensure handles match if not check_sender_and_entity_handle_match(sender, entity.handle): return [] try: entity.validate() except ValueError as ex: logger.error("Failed to validate entity %s: %s", entity, ex, extra={ "attrs": attrs, "transformed": transformed, }) return [] # Extract mentions entity._mentions = entity.extract_mentions() # Do child elements for child in element: entity._children.extend(element_to_objects(child, sender, user=user)) # Add to entities list entities.append(entity) return entities
python
def db_scan_block( block_id, op_list, db_state=None ): """ (required by virtualchain state engine) Given the block ID and the list of virtualchain operations in the block, do block-level preprocessing: * find the state-creation operations we will accept * make sure there are no collisions. This modifies op_list, but returns nothing. This aborts on runtime error. """ try: assert db_state is not None, "BUG: no state given" except Exception, e: log.exception(e) log.error("FATAL: no state given") os.abort() log.debug("SCAN BEGIN: {} ops at block {}".format(len(op_list), block_id)) checked_ops = [] for op_data in op_list: try: opcode = op_get_opcode_name( op_data['op'] ) assert opcode is not None, "BUG: unknown op '%s'" % op except Exception, e: log.exception(e) log.error("FATAL: invalid operation") os.abort() if opcode not in OPCODE_CREATION_OPS: continue # make sure there are no collisions: # build up our collision table in db_state. op_check( db_state, op_data, block_id, checked_ops ) checked_ops.append( op_data ) # get collision information for this block collisions = db_state.find_collisions( checked_ops ) # reject all operations that will collide db_state.put_collisions( block_id, collisions ) log.debug("SCAN END: {} ops at block {} ({} collisions)".format(len(op_list), block_id, len(collisions)))
java
public Content propertyTagOutput(Tag tag, String prefix) { Content body = new ContentBuilder(); body.addContent(new RawHtml(prefix)); body.addContent(" "); body.addContent(HtmlTree.CODE(new RawHtml(tag.text()))); body.addContent("."); Content result = HtmlTree.P(body); return result; }
java
private String extractErrorMessageFromResponse(HttpResponse response) { String contentType = response.getEntity().getContentType().getValue(); if(contentType.contains("application/json")) { Gson gson = GsonResponseParser.getDefaultGsonParser(false); String responseBody = null; try { responseBody = EntityUtils.toString(response.getEntity()); LOG.error("Body of error response from Canvas: " + responseBody); CanvasErrorResponse errorResponse = gson.fromJson(responseBody, CanvasErrorResponse.class); List<ErrorMessage> errors = errorResponse.getErrors(); if(errors != null) { //I have only ever seen a single error message but it is an array so presumably there could be more. return errors.stream().map(e -> e.getMessage()).collect(Collectors.joining(", ")); } else{ return responseBody; } } catch (Exception e) { //Returned JSON was not in expected format. Fall back to returning the whole response body, if any if(StringUtils.isNotBlank(responseBody)) { return responseBody; } } } return null; }
java
public SortedSet<HmmerResult> scan(ProteinSequence sequence, URL serviceLocation) throws IOException{ StringBuffer postContent = new StringBuffer(); postContent.append("hmmdb=pfam"); // by default hmmscan runs with the HMMER3 cut_ga parameter enabled, the "gathering threshold", which depends on // the cutoffs defined in the underlying HMM files. // to request a different cutoff by e-value this could be enabled: //postContent.append("&E=1"); postContent.append("&seq="); postContent.append(sequence.getSequenceAsString()); HttpURLConnection connection = (HttpURLConnection) serviceLocation.openConnection(); connection.setDoOutput(true); connection.setDoInput(true); connection.setConnectTimeout(15000); // 15 sec connection.setInstanceFollowRedirects(false); connection.setRequestMethod("POST"); connection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded"); connection.setRequestProperty("Accept","application/json"); connection.setRequestProperty("Content-Length", "" + Integer.toString(postContent.toString().getBytes().length)); //Send request DataOutputStream wr = new DataOutputStream ( connection.getOutputStream ()); wr.write(postContent.toString().getBytes()); wr.flush (); wr.close (); //Now get the redirect URL URL respUrl = new URL( connection.getHeaderField( "Location" )); int responseCode = connection.getResponseCode(); if ( responseCode == 500){ LOGGER.warn("Got 500 response code for URL {}. Response message: {}.", serviceLocation, connection.getResponseMessage()); } HttpURLConnection connection2 = (HttpURLConnection) respUrl.openConnection(); connection2.setRequestMethod("GET"); connection2.setRequestProperty("Accept", "application/json"); connection2.setConnectTimeout(60000); // 1 minute //Get the response BufferedReader in = new BufferedReader( new InputStreamReader( connection2.getInputStream())); String inputLine; StringBuffer result = new StringBuffer(); while ((inputLine = in.readLine()) != null) { result.append(inputLine); } in.close(); // process the response and build up a container for the data. SortedSet<HmmerResult> results = new TreeSet<HmmerResult>(); try { JSONObject json = JSONObject.fromObject(result.toString()); JSONObject hmresults = json.getJSONObject("results"); JSONArray hits = hmresults.getJSONArray("hits"); for(int i =0 ; i < hits.size() ; i++){ JSONObject hit = hits.getJSONObject(i); HmmerResult hmmResult = new HmmerResult(); Object dclO = hit.get("dcl"); Integer dcl = -1; if ( dclO instanceof Long){ Long dclL = (Long) dclO; dcl = dclL.intValue(); } else if ( dclO instanceof Integer){ dcl = (Integer) dclO; } hmmResult.setAcc((String)hit.get("acc")); hmmResult.setDcl(dcl); hmmResult.setDesc((String)hit.get("desc")); hmmResult.setEvalue(Float.parseFloat((String)hit.get("evalue"))); hmmResult.setName((String)hit.get("name")); hmmResult.setNdom((Integer)hit.get("ndom")); hmmResult.setNreported((Integer)hit.get("nreported")); hmmResult.setPvalue((Double)hit.get("pvalue")); hmmResult.setScore(Float.parseFloat((String)hit.get("score"))); JSONArray hmmdomains = hit.getJSONArray("domains"); SortedSet<HmmerDomain> domains = new TreeSet<HmmerDomain>(); for ( int j= 0 ; j < hmmdomains.size() ; j++){ JSONObject d = hmmdomains.getJSONObject(j); Integer is_included = getInteger(d.get("is_included")); if ( is_included == 0) { continue; } // this filters out multiple hits to the same clan Integer outcompeted = getInteger(d.get("outcompeted")); if ( outcompeted != null && outcompeted == 1) { continue; } Integer significant = getInteger(d.get("significant")); if ( significant != 1) { continue; } HmmerDomain dom = new HmmerDomain(); dom.setAliLenth((Integer)d.get("aliL")); dom.setHmmAcc((String)d.get("alihmmacc")); dom.setHmmDesc((String)d.get("alihmmdesc")); dom.setHmmFrom(getInteger(d.get("alihmmfrom"))); dom.setHmmTo(getInteger(d.get("alihmmto"))); dom.setSimCount((Integer) d.get("aliSimCount")); dom.setSqFrom(getInteger(d.get("alisqfrom"))); dom.setSqTo(getInteger(d.get("alisqto"))); dom.setHmmName((String)d.get("alihmmname")); dom.setEvalue(Float.parseFloat((String)d.get("ievalue"))); domains.add(dom); } hmmResult.setDomains(domains); results.add(hmmResult); } } catch (NumberFormatException e){ LOGGER.warn("Could not parse number in Hmmer web service json response: {}", e.getMessage()); } return results; }
python
def clip(self, lower=0, upper=127): """ Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127. """ for track in self.tracks: track.clip(lower, upper)
java
protected void swapFragments(final AjaxRequestTarget target, final Form<?> form) { if (modeContext.equals(ModeContext.VIEW_MODE)) { onSwapToEdit(target, form); } else { onSwapToView(target, form); } }
java
private static Properties load() { Properties properties = new Properties(); String file = System.getProperty(CONFIGURATION_PROPERTY); try { if (file != null) { InputStream stream; if (URL_DETECTION_PATTERN.matcher(file).matches()) { stream = new URL(file).openStream(); } else { stream = Thread.currentThread().getContextClassLoader().getResourceAsStream(file); if (stream == null) { stream = new FileInputStream(file); } } load(properties, stream); } else { InputStream stream = Thread.currentThread().getContextClassLoader().getResourceAsStream(DEFAULT_CONFIGURATION_FILE); if (stream != null) { load(properties, stream); } } } catch (IOException ex) { InternalLogger.log(Level.ERROR, "Failed loading configuration from '" + file + "'"); } for (Object key : new ArrayList<Object>(System.getProperties().keySet())) { String name = (String) key; if (name.startsWith(PROPERTIES_PREFIX)) { properties.put(name.substring(PROPERTIES_PREFIX.length()), System.getProperty(name)); } } for (Entry<Object, Object> entry : properties.entrySet()) { String value = (String) entry.getValue(); if (value.indexOf('{') != -1) { value = resolve(value, EnvironmentVariableResolver.INSTANCE); value = resolve(value, SystemPropertyResolver.INSTANCE); properties.put(entry.getKey(), value); } } return properties; }
java
@Nullable public static String readSafeUTF (@Nonnull final DataInput aDI) throws IOException { ValueEnforcer.notNull (aDI, "DataInput"); final int nLayout = aDI.readByte (); final String ret; switch (nLayout) { case 0: { // If the first byte has value "0" it means the whole String is simply null ret = null; break; } case 1: { // length in UTF-8 bytes followed by the main bytes final int nLength = aDI.readInt (); final byte [] aData = new byte [nLength]; aDI.readFully (aData); ret = new String (aData, StandardCharsets.UTF_8); break; } case 2: { // length in UTF-8 bytes followed by the main bytes, than the end of byte marker final int nLength = aDI.readInt (); final byte [] aData = new byte [nLength]; aDI.readFully (aData); ret = new String (aData, StandardCharsets.UTF_8); final int nEndOfString = aDI.readInt (); if (nEndOfString != END_OF_STRING_MARKER) throw new IOException ("Missing end of String marker"); break; } default: throw new IOException ("Unsupported string layout version " + nLayout); } return ret; }
python
def analysis(analysis_id): """Display a single analysis.""" analysis_obj = store.analysis(analysis_id) if analysis_obj is None: return abort(404) if request.method == 'PUT': analysis_obj.update(request.json) store.commit() data = analysis_obj.to_dict() data['failed_jobs'] = [job_obj.to_dict() for job_obj in analysis_obj.failed_jobs] data['user'] = analysis_obj.user.to_dict() if analysis_obj.user else None return jsonify(**data)
java
public String toJSONString(final boolean compact) { final StringBuilder builder = new StringBuilder(); formatAsJSON(builder, 0, !compact); return builder.toString(); }
python
def rotate(name, pattern=None, conf_file=default_conf, **kwargs): ''' Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliases will only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700' ''' ## cleanup kwargs kwargs = salt.utils.args.clean_kwargs(**kwargs) ## inject name into kwargs if 'entryname' not in kwargs and name and not name.startswith('/'): kwargs['entryname'] = name ## inject pattern into kwargs if 'log_file' not in kwargs: if pattern and pattern.startswith('/'): kwargs['log_file'] = pattern # NOTE: for backwards compatibility check if name is a path elif name and name.startswith('/'): kwargs['log_file'] = name ## build command log.debug("logadm.rotate - kwargs: %s", kwargs) command = "logadm -f {}".format(conf_file) for arg, val in kwargs.items(): if arg in option_toggles.values() and val: command = "{} {}".format( command, _arg2opt(arg), ) elif arg in option_flags.values(): command = "{} {} {}".format( command, _arg2opt(arg), _quote_args(six.text_type(val)) ) elif arg != 'log_file': log.warning("Unknown argument %s, don't know how to map this!", arg) if 'log_file' in kwargs: # NOTE: except from ```man logadm``` # If no log file name is provided on a logadm command line, the entry # name is assumed to be the same as the log file name. For example, # the following two lines achieve the same thing, keeping two copies # of rotated log files: # # % logadm -C2 -w mylog /my/really/long/log/file/name # % logadm -C2 -w /my/really/long/log/file/name if 'entryname' not in kwargs: command = "{} -w {}".format(command, _quote_args(kwargs['log_file'])) else: command = "{} {}".format(command, _quote_args(kwargs['log_file'])) log.debug("logadm.rotate - command: %s", command) result = __salt__['cmd.run_all'](command, python_shell=False) if result['retcode'] != 0: return dict(Error='Failed in adding log', Output=result['stderr']) return dict(Result='Success')
python
def distributions(self, complexes, counts, volume, maxstates=1e7, ordered=False, temp=37.0): '''Runs the \'distributions\' NUPACK command. Note: this is intended for a relatively small number of species (on the order of ~20 total strands for complex size ~14). :param complexes: A list of the type returned by the complexes() method. :type complexes: list :param counts: A list of the exact number of molecules of each initial species (the strands in the complexes command). :type counts: list of ints :param volume: The volume, in liters, of the container. :type volume: float :param maxstates: Maximum number of states to be enumerated, needed as allowing too many states can lead to a segfault. In NUPACK, this is referred to as lambda. :type maxstates: float :param ordered: Consider distinct ordered complexes - all distinct circular permutations of each complex. :type ordered: bool :param temp: Temperature in C. :type temp: float :returns: A list of dictionaries containing (at least) a 'complexes' key for the unique complex, an 'ev' key for the expected value of the complex population and a 'probcols' list indicating the probability that a given complex has population 0, 1, ... max(pop) at equilibrium. :rtype: list :raises: LambdaError if maxstates is exceeded. ''' # Check inputs nstrands = len(complexes[0]['strands']) if len(counts) != nstrands: raise ValueError('counts argument not same length as strands.') # Set up command-line arguments cmd_args = [] if ordered: cmd_args.append('-ordered') # Write .count file countpath = os.path.join(self._tempdir, 'distributions.count') with open(countpath, 'w') as f: f.writelines([str(c) for c in counts] + [str(volume)]) # Write .cx or .ocx file header = ['%t Number of strands: {}'.format(nstrands), '%\tid\tsequence'] for i, strand in enumerate(complexes['strands']): header.append('%\t{}\t{}'.format(i + 1, strand)) header.append('%\tT = {}'.format(temp)) body = [] for i, cx in enumerate(complexes): permutation = '\t'.join(complexes['complex']) line = '{}\t{}\t{}'.format(i + 1, permutation, complexes['energy']) body.append(line) if ordered: cxfile = os.path.join(self._tempdir, 'distributions.ocx') else: cxfile = os.path.join(self._tempdir, 'distributions.cx') with open(cxfile) as f: f.writelines(header + body) # Run 'distributions' stdout = self._run('distributions', cmd_args, None) # Parse STDOUT stdout_lines = stdout.split('\n') if stdout_lines[0].startswith('Exceeded maximum number'): raise LambdaError('Exceeded maxstates combinations.') # pop_search = re.search('There are (*) pop', stdout_lines[0]).group(1) # populations = int(pop_search) # kT_search = re.search('of the box: (*) kT', stdout_lines[1]).group(1) # kT = float(kT_search) # Parse .dist file (comments header + TSV) dist_lines = self._read_tempfile('distributions.dist').split('\n') tsv_lines = [l for l in dist_lines if not l.startswith('%')] tsv_lines.pop() output = [] for i, line in enumerate(tsv_lines): data = line.split('\t') # Column 0 is an index # Columns 1-nstrands are complexes cx = [int(d) for d in data[1:nstrands]] # Column nstrands + 1 is expected value of complex ev = float(data[nstrands + 1]) # Columns nstrands + 2 and on are probability columns probcols = [float(d) for d in data[nstrands + 2:]] output[i]['complex'] = cx output[i]['ev'] = ev output[i]['probcols'] = probcols return output
python
def _ActivateBreakpoint(self, module): """Sets the breakpoint in the loaded module, or complete with error.""" # First remove the import hook (if installed). self._RemoveImportHook() line = self.definition['location']['line'] # Find the code object in which the breakpoint is being set. status, codeobj = module_explorer.GetCodeObjectAtLine(module, line) if not status: # First two parameters are common: the line of the breakpoint and the # module we are trying to insert the breakpoint in. # TODO(emrekultursay): Do not display the entire path of the file. Either # strip some prefix, or display the path in the breakpoint. params = [str(line), os.path.splitext(module.__file__)[0] + '.py'] # The next 0, 1, or 2 parameters are the alternative lines to set the # breakpoint at, displayed for the user's convenience. alt_lines = (str(l) for l in codeobj if l is not None) params += alt_lines if len(params) == 4: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_4 elif len(params) == 3: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_3 else: fmt = ERROR_LOCATION_NO_CODE_FOUND_AT_LINE_2 self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_SOURCE_LOCATION', 'description': { 'format': fmt, 'parameters': params}}}) return # Compile the breakpoint condition. condition = None if self.definition.get('condition'): try: condition = compile(self.definition.get('condition'), '<condition_expression>', 'eval') except (TypeError, ValueError) as e: # condition string contains null bytes. self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_CONDITION', 'description': { 'format': 'Invalid expression', 'parameters': [str(e)]}}}) return except SyntaxError as e: self._CompleteBreakpoint({ 'status': { 'isError': True, 'refersTo': 'BREAKPOINT_CONDITION', 'description': { 'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}}}) return native.LogInfo('Creating new Python breakpoint %s in %s, line %d' % ( self.GetBreakpointId(), codeobj, line)) self._cookie = native.SetConditionalBreakpoint( codeobj, line, condition, self._BreakpointEvent)
python
def tree_to_graph(bbltree:BubbleTree) -> Graph or Digraph: """Compute as a graphviz.Graph instance the given graph. If given BubbleTree instance is oriented, returned value is a graphviz.Digraph. See http://graphviz.readthedocs.io/en/latest/examples.html#cluster-py for graphviz API """ GraphObject = Digraph if bbltree.oriented else Graph def create(name:str): """Return a graphviz graph figurating a powernode""" ret = GraphObject('cluster_' + name) # dirty hack to get links between clusters: add a blank node inside # so the subgraph don't take it's name directly, but the blank node do. # ret.body.append('label = "{}"'.format(name)) # replaced by: ret.node(name, style='invis', shape='point') # ret.body.append('style=plaintext') ret.body.append('color=lightgrey') ret.body.append('label=""') ret.body.append('shape=ellipse') ret.body.append('penwidth=2') ret.body.append('pencolor=black') return ret nodes = frozenset(bbltree.nodes()) subgraphs = {} # build for each powernode the associated subgraph, and add its successors for powernode in bbltree.powernodes(): if powernode not in subgraphs: subgraphs[powernode] = create(powernode) for succ in bbltree.inclusions[powernode]: if succ not in subgraphs: if succ not in nodes: subgraphs[succ] = create(succ) else: subgraphs[powernode].node(succ) # add to Graph instances the Graph of successors as subgraphs for powernode, succs in bbltree.inclusions.items(): for succ in succs: if succ not in nodes: subgraphs[powernode].subgraph(subgraphs[succ]) # build the final graph by adding to it subgraphs of roots graph = GraphObject('graph', graph_attr={'compound': 'true'}) for root in bbltree.roots: if root in subgraphs: graph.subgraph(subgraphs[root]) # add the edges to the final graph for source, targets in bbltree.edges.items(): for target in targets: if source <= target: attrs = {} if source not in nodes: attrs.update({'ltail': 'cluster_' + source}) if target not in nodes: attrs.update({'lhead': 'cluster_' + target}) graph.edge(source, target, **attrs) # print(graph) # debug line # graph.view() # debug line return graph
java
@Override public ExtensionProcessor getLogoutProcessor() { //do not cache the processor in the webcontainer code, always return the result from security IWebAppSecurityCollaborator secCollab = this.collabHelper.getSecurityCollaborator(); logoutProcessor = secCollab.getFormLogoutExtensionProcessor(this); return logoutProcessor; }
java
private void _recurse (@Nonnull final Block aRoot, final boolean bListMode) { Block aBlock; Block aList; Line aLine = aRoot.m_aLines; if (bListMode) { aRoot.removeListIndent (m_bUseExtensions); if (m_bUseExtensions && aRoot.m_aLines != null && aRoot.m_aLines.getLineType (m_bUseExtensions) != ELineType.CODE) { aRoot.m_sID = aRoot.m_aLines.stripID (); } } while (aLine != null && aLine.m_bIsEmpty) aLine = aLine.m_aNext; if (aLine == null) return; while (aLine != null) { final ELineType eType = aLine.getLineType (m_bUseExtensions); switch (eType) { case OTHER: { final boolean bWasEmpty = aLine.m_bPrevEmpty; while (aLine != null && !aLine.m_bIsEmpty) { final ELineType eType2 = aLine.getLineType (m_bUseExtensions); if ((bListMode || m_bUseExtensions) && (eType2 == ELineType.OLIST || eType2 == ELineType.ULIST)) break; if (m_bUseExtensions && (eType2 == ELineType.CODE || eType2 == ELineType.FENCED_CODE || eType2 == ELineType.PLUGIN)) break; if (eType2 == ELineType.HEADLINE || eType2 == ELineType.HEADLINE1 || eType2 == ELineType.HEADLINE2 || eType2 == ELineType.HR || eType2 == ELineType.BQUOTE || eType2 == ELineType.XML || eType2 == ELineType.XML_COMMENT) break; aLine = aLine.m_aNext; } final EBlockType eBlockType; if (aLine != null && !aLine.m_bIsEmpty) { eBlockType = (bListMode && !bWasEmpty) ? EBlockType.NONE : EBlockType.PARAGRAPH; aRoot.split (aLine.m_aPrevious).m_eType = eBlockType; aRoot.removeLeadingEmptyLines (); } else { eBlockType = (bListMode && (aLine == null || !aLine.m_bIsEmpty) && !bWasEmpty) ? EBlockType.NONE : EBlockType.PARAGRAPH; aRoot.split (aLine == null ? aRoot.m_aLineTail : aLine).m_eType = eBlockType; aRoot.removeLeadingEmptyLines (); } aLine = aRoot.m_aLines; break; } case CODE: while (aLine != null && (aLine.m_bIsEmpty || aLine.m_nLeading > 3)) { aLine = aLine.m_aNext; } aBlock = aRoot.split (aLine != null ? aLine.m_aPrevious : aRoot.m_aLineTail); aBlock.m_eType = EBlockType.CODE; aBlock.removeSurroundingEmptyLines (); break; case XML: case XML_COMMENT: if (aLine.m_aPrevious != null) { // FIXME ... this looks wrong aRoot.split (aLine.m_aPrevious); } aRoot.split (aLine.m_aXmlEndLine).m_eType = eType == ELineType.XML ? EBlockType.XML : EBlockType.XML_COMMENT; aRoot.removeLeadingEmptyLines (); aLine = aRoot.m_aLines; break; case BQUOTE: while (aLine != null) { if (!aLine.m_bIsEmpty && aLine.m_bPrevEmpty && aLine.m_nLeading == 0 && aLine.getLineType (m_bUseExtensions) != ELineType.BQUOTE) break; aLine = aLine.m_aNext; } aBlock = aRoot.split (aLine != null ? aLine.m_aPrevious : aRoot.m_aLineTail); aBlock.m_eType = EBlockType.BLOCKQUOTE; aBlock.removeSurroundingEmptyLines (); aBlock.removeBlockQuotePrefix (); _recurse (aBlock, false); aLine = aRoot.m_aLines; break; case HR: if (aLine.m_aPrevious != null) { // FIXME ... this looks wrong aRoot.split (aLine.m_aPrevious); } aRoot.split (aLine).m_eType = EBlockType.RULER; aRoot.removeLeadingEmptyLines (); aLine = aRoot.m_aLines; break; case FENCED_CODE: aLine = aLine.m_aNext; while (aLine != null) { if (aLine.getLineType (m_bUseExtensions) == ELineType.FENCED_CODE) break; // TODO ... is this really necessary? Maybe add a special // flag? aLine = aLine.m_aNext; } if (aLine != null) aLine = aLine.m_aNext; aBlock = aRoot.split (aLine != null ? aLine.m_aPrevious : aRoot.m_aLineTail); aBlock.m_eType = EBlockType.FENCED_CODE; aBlock.m_sMeta = MarkdownHelper.getMetaFromFence (aBlock.m_aLines.m_sValue); aBlock.m_aLines.setEmpty (); if (aBlock.m_aLineTail.getLineType (m_bUseExtensions) == ELineType.FENCED_CODE) aBlock.m_aLineTail.setEmpty (); aBlock.removeSurroundingEmptyLines (); break; case PLUGIN: aLine = aLine.m_aNext; while (aLine != null) { if (aLine.getLineType (m_bUseExtensions) == ELineType.PLUGIN) break; // TODO ... is this really necessary? Maybe add a special // flag? aLine = aLine.m_aNext; } if (aLine != null) aLine = aLine.m_aNext; aBlock = aRoot.split (aLine != null ? aLine.m_aPrevious : aRoot.m_aLineTail); aBlock.m_eType = EBlockType.PLUGIN; aBlock.m_sMeta = MarkdownHelper.getMetaFromFence (aBlock.m_aLines.m_sValue); aBlock.m_aLines.setEmpty (); if (aBlock.m_aLineTail.getLineType (m_bUseExtensions) == ELineType.PLUGIN) aBlock.m_aLineTail.setEmpty (); aBlock.removeSurroundingEmptyLines (); break; case HEADLINE: case HEADLINE1: case HEADLINE2: if (aLine.m_aPrevious != null) aRoot.split (aLine.m_aPrevious); if (eType != ELineType.HEADLINE) aLine.m_aNext.setEmpty (); aBlock = aRoot.split (aLine); aBlock.m_eType = EBlockType.HEADLINE; if (eType != ELineType.HEADLINE) aBlock.m_nHeadlineDepth = eType == ELineType.HEADLINE1 ? 1 : 2; if (m_bUseExtensions) aBlock.m_sID = aBlock.m_aLines.stripID (); aBlock.transfromHeadline (); aRoot.removeLeadingEmptyLines (); aLine = aRoot.m_aLines; break; case OLIST: case ULIST: while (aLine != null) { final ELineType eType2 = aLine.getLineType (m_bUseExtensions); if (!aLine.m_bIsEmpty && (aLine.m_bPrevEmpty && aLine.m_nLeading == 0 && !(eType2 == ELineType.OLIST || eType2 == ELineType.ULIST))) break; aLine = aLine.m_aNext; } aList = aRoot.split (aLine != null ? aLine.m_aPrevious : aRoot.m_aLineTail); aList.m_eType = eType == ELineType.OLIST ? EBlockType.ORDERED_LIST : EBlockType.UNORDERED_LIST; aList.m_aLines.m_bPrevEmpty = false; aList.removeSurroundingEmptyLines (); aList.m_aLines.m_bPrevEmpty = false; _initListBlock (aList); aBlock = aList.m_aBlocks; while (aBlock != null) { _recurse (aBlock, true); aBlock = aBlock.m_aNext; } aList.expandListParagraphs (); break; default: aLine = aLine.m_aNext; break; } } }
python
def _remove_identical_contigs(self, containing_contigs, contig_lengths): '''Input is dictionary of containing contigs made by self._expand_containing_using_transitivity(). Removes redundant identical contigs, leaving one representative (the longest) of each set of identical contigs. Returns new version of dictionary, and a dictionary of contig name => contig it was replaced with''' identical_contigs = self._get_identical_contigs(containing_contigs) to_replace = {} # contig name => name to replace it with for contig_set in identical_contigs: longest_contig = self._longest_contig(contig_set, contig_lengths) for name in contig_set - {longest_contig}: assert name not in to_replace to_replace[name] = longest_contig for name, replace_with in to_replace.items(): if replace_with not in containing_contigs: containing_contigs[replace_with] = set() if name in containing_contigs: containing_contigs[replace_with].update(containing_contigs[name]) del containing_contigs[name] to_delete = set() for name, names_set in containing_contigs.items(): assert name not in to_replace new_set = {to_replace.get(x, x) for x in names_set} new_set.discard(name) if len(new_set) > 0: containing_contigs[name] = new_set else: to_delete.add(name) for name in to_delete: del containing_contigs[name] return containing_contigs, to_replace
java
public static long getMinimumSequence(final Sequence[] sequences, long minimum) { for (int i = 0, n = sequences.length; i < n; i++) { long value = sequences[i].get(); minimum = Math.min(minimum, value); } return minimum; }
java
static int mask(Class<? extends ChannelHandler> clazz) { // Try to obtain the mask from the cache first. If this fails calculate it and put it in the cache for fast // lookup in the future. Map<Class<? extends ChannelHandler>, Integer> cache = MASKS.get(); Integer mask = cache.get(clazz); if (mask == null) { mask = mask0(clazz); cache.put(clazz, mask); } return mask; }
python
def hmtk_histogram_2D(xvalues, yvalues, bins, x_offset=1.0E-10, y_offset=1.0E-10): """ See the explanation for the 1D case - now applied to 2D. :param numpy.ndarray xvalues: Values of x-data :param numpy.ndarray yvalues: Values of y-data :param tuple bins: Tuple containing bin intervals for x-data and y-data (as numpy arrays) :param float x_offset: Small amount to offset the x-bins for floating point precision :param float y_offset: Small amount to offset the y-bins for floating point precision :returns: Count in each bin (as float) """ xbins, ybins = (bins[0] - x_offset, bins[1] - y_offset) n_x = len(xbins) - 1 n_y = len(ybins) - 1 counter = np.zeros([n_y, n_x], dtype=float) for j in range(n_y): y_idx = np.logical_and(yvalues >= ybins[j], yvalues < ybins[j + 1]) x_vals = xvalues[y_idx] for i in range(n_x): idx = np.logical_and(x_vals >= xbins[i], x_vals < xbins[i + 1]) counter[j, i] += float(np.sum(idx)) return counter.T
java
public void marshall(AccelerationSettings accelerationSettings, ProtocolMarshaller protocolMarshaller) { if (accelerationSettings == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(accelerationSettings.getMode(), MODE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def rename_image(self, ami_name, new_ami_name, source_region='us-east-1'): """ Method which renames an ami by copying to a new ami with a new name (only way this is possible in AWS) :param ami_name: :param new_ami_name: :return: """ print "Re-naming/moving AMI to desired name and region" image_id = self.get_image_id_by_name(ami_name, source_region) self.copy_ami_to_new_name(image_id, new_ami_name, source_region) self.deregister_image(image_id, source_region)
java
public ApiResponse<List<FactionWarfareWarsResponse>> getFwWarsWithHttpInfo(String datasource, String ifNoneMatch) throws ApiException { com.squareup.okhttp.Call call = getFwWarsValidateBeforeCall(datasource, ifNoneMatch, null); Type localVarReturnType = new TypeToken<List<FactionWarfareWarsResponse>>() { }.getType(); return apiClient.execute(call, localVarReturnType); }
python
def to_text(self, line): """ Return the textual representation of the given `line`. """ return getattr(self, self.ENTRY_TRANSFORMERS[line.__class__])(line)
python
def request(self, action, data={}, headers={}, method='GET'): """ Append the user authentication details to every incoming request """ data = self.merge(data, {'user': self.username, 'password': self.password, 'api_id': self.apiId}) return Transport.request(self, action, data, headers, method)
python
def _ParseCachedEntry8(self, value_data, cached_entry_offset): """Parses a Windows 8.0 or 8.1 cached entry. Args: value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: AppCompatCacheCachedEntry: cached entry. Raises: ParseError: if the value data could not be parsed. """ try: cached_entry = self._ReadStructureFromByteStream( value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry value with error: {0!s}'.format( exception)) if cached_entry.signature not in ( self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1): raise errors.ParseError('Unsupported cache entry signature') cached_entry_data = value_data[cached_entry_offset:] if cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0: data_type_map_name = 'appcompatcache_cached_entry_body_8_0' elif cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1: data_type_map_name = 'appcompatcache_cached_entry_body_8_1' data_type_map = self._GetDataTypeMap(data_type_map_name) context = dtfabric_data_maps.DataTypeMapContext() try: cached_entry_body = self._ReadStructureFromByteStream( cached_entry_data[12:], cached_entry_offset + 12, data_type_map, context=context) except (ValueError, errors.ParseError) as exception: raise errors.ParseError( 'Unable to parse cached entry body with error: {0!s}'.format( exception)) data_offset = context.byte_size data_size = cached_entry_body.data_size cached_entry_object = AppCompatCacheCachedEntry() cached_entry_object.cached_entry_size = ( 12 + cached_entry.cached_entry_data_size) cached_entry_object.insertion_flags = cached_entry_body.insertion_flags cached_entry_object.last_modification_time = ( cached_entry_body.last_modification_time) cached_entry_object.path = cached_entry_body.path cached_entry_object.shim_flags = cached_entry_body.shim_flags if data_size > 0: cached_entry_object.data = cached_entry_data[ data_offset:data_offset + data_size] return cached_entry_object
python
def __parse(self) -> object: """Selects the appropriate method to decode next bencode element and returns the result.""" char = self.data[self.idx: self.idx + 1] if char in [b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b'0']: str_len = int(self.__read_to(b':')) return self.__read(str_len) elif char == b'i': self.idx += 1 return int(self.__read_to(b'e')) elif char == b'd': return self.__parse_dict() elif char == b'l': return self.__parse_list() elif char == b'': raise bencodepy.DecodingError('Unexpected End of File at index position of {0}.'.format(str(self.idx))) else: raise bencodepy.DecodingError( 'Invalid token character ({0}) at position {1}.'.format(str(char), str(self.idx)))
python
def get_logging_fields(self, model): """ Returns a dictionary mapping of the fields that are used for keeping the acutal audit log entries. """ rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower() def entry_instance_to_unicode(log_entry): try: result = '%s: %s %s at %s'%(model._meta.object_name, log_entry.object_state, log_entry.get_action_type_display().lower(), log_entry.action_date, ) except AttributeError: result = '%s %s at %s'%(model._meta.object_name, log_entry.get_action_type_display().lower(), log_entry.action_date ) return result action_user_field = LastUserField(related_name = rel_name, editable = False) #check if the manager has been attached to auth user model if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."): action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self') return { 'action_id' : models.AutoField(primary_key = True), 'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False), 'action_user' : action_user_field, 'action_type' : models.CharField(max_length = 1, editable = False, choices = ( ('I', _('Created')), ('U', _('Changed')), ('D', _('Deleted')), )), 'object_state' : LogEntryObjectDescriptor(model), '__unicode__' : entry_instance_to_unicode, }
python
def load_config(self, config_file_name): """ Load configuration file from prt or str. Configuration file type is extracted from the file suffix - prt or str. :param config_file_name: full path to the configuration file. IxTclServer must have access to the file location. either: The config file is on shared folder. IxTclServer run on the client machine. """ config_file_name = config_file_name.replace('\\', '/') ext = path.splitext(config_file_name)[-1].lower() if ext == '.prt': self.api.call_rc('port import "{}" {}'.format(config_file_name, self.uri)) elif ext == '.str': self.reset() self.api.call_rc('stream import "{}" {}'.format(config_file_name, self.uri)) else: raise ValueError('Configuration file type {} not supported.'.format(ext)) self.write() self.discover()
python
def clear_cached_endpoints(self, prefix=None): """ Invalidate all cached endpoints, meta included Loop over all meta endpoints to generate all cache key the invalidate each of them. Doing it this way will prevent the app not finding keys as the user may change its prefixes Meta endpoint will be updated upon next call. :param: prefix the prefix for the cache key (default is cache_prefix) """ prefix = prefix if prefix is not None else self.cache_prefix for endpoint in self.app.op.values(): cache_key = '%s:app:%s' % (prefix, endpoint.url) self.cache.invalidate(cache_key) self.cache.invalidate('%s:app:meta_swagger_url' % self.cache_prefix) self.app = None
java
public int decompose(CharSequence s, int src, int limit, ReorderingBuffer buffer) { int minNoCP=minDecompNoCP; int prevSrc; int c=0; int norm16=0; // only for quick check int prevBoundary=src; int prevCC=0; for(;;) { // count code units below the minimum or with irrelevant data for the quick check for(prevSrc=src; src!=limit;) { if( (c=s.charAt(src))<minNoCP || isMostDecompYesAndZeroCC(norm16=normTrie.getFromU16SingleLead((char)c)) ) { ++src; } else if(!UTF16.isSurrogate((char)c)) { break; } else { char c2; if(UTF16Plus.isSurrogateLead(c)) { if((src+1)!=limit && Character.isLowSurrogate(c2=s.charAt(src+1))) { c=Character.toCodePoint((char)c, c2); } } else /* trail surrogate */ { if(prevSrc<src && Character.isHighSurrogate(c2=s.charAt(src-1))) { --src; c=Character.toCodePoint(c2, (char)c); } } if(isMostDecompYesAndZeroCC(norm16=getNorm16(c))) { src+=Character.charCount(c); } else { break; } } } // copy these code units all at once if(src!=prevSrc) { if(buffer!=null) { buffer.flushAndAppendZeroCC(s, prevSrc, src); } else { prevCC=0; prevBoundary=src; } } if(src==limit) { break; } // Check one above-minimum, relevant code point. src+=Character.charCount(c); if(buffer!=null) { decompose(c, norm16, buffer); } else { if(isDecompYes(norm16)) { int cc=getCCFromYesOrMaybe(norm16); if(prevCC<=cc || cc==0) { prevCC=cc; if(cc<=1) { prevBoundary=src; } continue; } } return prevBoundary; // "no" or cc out of order } } return src; }
java
public static Pattern mappingToRegex(String mapping) { return Pattern.compile(mapping.replaceAll("\\.", "\\.") .replaceAll("^\\*(.*)", "^(.*)$1\\$") .replaceAll("(.*)\\*$", "^$1(.*)\\$")); }
java
@Override public void format(Buffer buf) { int slotSize = RecordPage.slotSize(ti.schema()); Constant emptyFlag = new IntegerConstant(EMPTY); for (int pos = 0; pos + slotSize <= Buffer.BUFFER_SIZE; pos += slotSize) { setVal(buf, pos, emptyFlag); makeDefaultRecord(buf, pos); } }
java
@Override public RebootReplicationInstanceResult rebootReplicationInstance(RebootReplicationInstanceRequest request) { request = beforeClientExecution(request); return executeRebootReplicationInstance(request); }
java
@Override public LoadBalancerNodeFilter and(LoadBalancerNodeFilter otherFilter) { if (evaluation instanceof SingleFilterEvaluation && otherFilter.evaluation instanceof SingleFilterEvaluation) { return new LoadBalancerNodeFilter( getLoadBalancerPoolFilter().and(otherFilter.getLoadBalancerPoolFilter()), getPredicate().and(otherFilter.getPredicate()) ); } else { evaluation = new AndEvaluation<>(evaluation, otherFilter, LoadBalancerNodeMetadata::getIpAddress); return this; } }
python
def get_student_enrollments(self): """ Returns an Enrollments object with the user enrollments Returns: Enrollments: object representing the student enrollments """ # the request is done in behalf of the current logged in user resp = self.requester.get( urljoin(self.base_url, self.enrollment_url)) resp.raise_for_status() return Enrollments(resp.json())
python
def compute_acl(cls, filename, start_index=None, end_index=None, min_nsamples=10): """Computes the autocorrleation length for all model params and temperatures in the given file. Parameter values are averaged over all walkers at each iteration and temperature. The ACL is then calculated over the averaged chain. Parameters ----------- filename : str Name of a samples file to compute ACLs for. start_index : {None, int} The start index to compute the acl from. If None, will try to use the number of burn-in iterations in the file; otherwise, will start at the first sample. end_index : {None, int} The end index to compute the acl to. If None, will go to the end of the current iteration. min_nsamples : int, optional Require a minimum number of samples to compute an ACL. If the number of samples per walker is less than this, will just set to ``inf``. Default is 10. Returns ------- dict A dictionary of ntemps-long arrays of the ACLs of each parameter. """ acls = {} with cls._io(filename, 'r') as fp: if end_index is None: end_index = fp.niterations tidx = numpy.arange(fp.ntemps) for param in fp.variable_params: these_acls = numpy.zeros(fp.ntemps) for tk in tidx: samples = fp.read_raw_samples( param, thin_start=start_index, thin_interval=1, thin_end=end_index, temps=tk, flatten=False)[param] # contract the walker dimension using the mean, and flatten # the (length 1) temp dimension samples = samples.mean(axis=1)[0, :] if samples.size < min_nsamples: acl = numpy.inf else: acl = autocorrelation.calculate_acl(samples) if acl <= 0: acl = numpy.inf these_acls[tk] = acl acls[param] = these_acls return acls
python
def inspect_plugin(self, name): """ Retrieve plugin metadata. Args: name (string): The name of the plugin. The ``:latest`` tag is optional, and is the default if omitted. Returns: A dict containing plugin info """ url = self._url('/plugins/{0}/json', name) return self._result(self._get(url), True)
java
protected boolean inViewPort(final int dataIndex) { boolean inViewPort = true; for (Layout layout: mLayouts) { inViewPort = inViewPort && (layout.inViewPort(dataIndex) || !layout.isClippingEnabled()); } return inViewPort; }
java
public Cluster withSubnetMapping(java.util.Map<String, String> subnetMapping) { setSubnetMapping(subnetMapping); return this; }
java
public Decision<Flow<T>> getOrCreateDecision() { List<Node> nodeList = childNode.get("decision"); if (nodeList != null && nodeList.size() > 0) { return new DecisionImpl<Flow<T>>(this, "decision", childNode, nodeList.get(0)); } return createDecision(); }
python
def _format_data(self, data): """ Sort the data in blue wavelengths to red, and ignore any spectra that have entirely non-finite or negative fluxes. """ return [spectrum for spectrum in \ sorted(data if isinstance(data, (list, tuple)) else [data], key=lambda x: x.disp[0]) if np.any(np.isfinite(spectrum.flux))]
java
public static Word2Vec fromPair(Pair<InMemoryLookupTable, VocabCache> pair) { Word2Vec vectors = new Word2Vec(); vectors.setLookupTable(pair.getFirst()); vectors.setVocab(pair.getSecond()); vectors.setModelUtils(new BasicModelUtils()); return vectors; }
python
def quality_comparator(video_data): """Custom comparator used to choose the right format based on the resolution.""" def parse_resolution(res: str) -> Tuple[int, ...]: return tuple(map(int, res.split('x'))) raw_resolution = video_data['resolution'] resolution = parse_resolution(raw_resolution) return resolution
java
public CompletableFuture<Object> optionsAsync(@DelegatesTo(HttpConfig.class) final Closure closure) { return CompletableFuture.supplyAsync(() -> options(closure), getExecutor()); }
java
public Matrix solve (Matrix B) throws SingularityException { if (B.getRowCount() != m) { throw new IllegalArgumentException("Matrix row dimensions must agree."); } if (!this.isNonsingular()) { throw new SingularityException("Matrix is singular."); } // Copy right hand side with pivoting int nx = B.getColumnCount(); double[][] X = new double[piv.length][nx]; for (int i=0; i<piv.length; i++) { for (int j=0; j<nx; j++) { X[i][j]=B.getQuick(piv[i], j); } } // Solve L*Y = B(piv,:) for (int k = 0; k < n; k++) { for (int i = k+1; i < n; i++) { for (int j = 0; j < nx; j++) { X[i][j] -= X[k][j]*LU[i][k]; } } } // Solve U*X = Y; for (int k = n-1; k >= 0; k--) { for (int j = 0; j < nx; j++) { X[k][j] /= LU[k][k]; } for (int i = 0; i < k; i++) { for (int j = 0; j < nx; j++) { X[i][j] -= X[k][j]*LU[i][k]; } } } return MatrixFactory.createMatrix(X); }
java
public void doWrite(FieldTable vectorTable, KeyAreaInfo keyArea, BaseBuffer bufferNew) throws DBException { super.doWrite(vectorTable, keyArea, bufferNew); }
java
@Nonnull @Override public String getProviderTermForIpAddress(@Nonnull Locale locale) { try { return getCapabilities().getProviderTermForIpAddress(locale); } catch (CloudException e) { throw new RuntimeException("Unexpected problem with capabilities", e); } catch (InternalException e) { throw new RuntimeException("Unexpected problem with capabilities", e); } }
python
def sorted_feed_cols(df): """ takes a dataframe's columns that would be of the form: ['feed003', 'failsafe_feed999', 'override_feed000', 'feed001', 'feed002'] and returns: ['override_feed000', 'feed001', 'feed002', 'feed003', 'failsafe_feed999'] """ cols = df.columns ind = [int(c.split("feed")[1]) for c in cols] cols = zip(ind,cols) cols.sort() cols = [c[1] for c in cols] return cols
python
def backColor(self, bc=None): """ Set/get actor's backface color. """ backProp = self.GetBackfaceProperty() if bc is None: if backProp: return backProp.GetDiffuseColor() return None if self.GetProperty().GetOpacity() < 1: colors.printc("~noentry backColor(): only active for alpha=1", c="y") return self if not backProp: backProp = vtk.vtkProperty() backProp.SetDiffuseColor(colors.getColor(bc)) backProp.SetOpacity(self.GetProperty().GetOpacity()) self.SetBackfaceProperty(backProp) self.GetMapper().ScalarVisibilityOff() return self
python
def load_from_file(path, fmt=None, is_training=True): ''' load data from file ''' if fmt is None: fmt = 'squad' assert fmt in ['squad', 'csv'], 'input format must be squad or csv' qp_pairs = [] if fmt == 'squad': with open(path) as data_file: data = json.load(data_file)['data'] for doc in data: for paragraph in doc['paragraphs']: passage = paragraph['context'] for qa_pair in paragraph['qas']: question = qa_pair['question'] qa_id = qa_pair['id'] if not is_training: qp_pairs.append( {'passage': passage, 'question': question, 'id': qa_id}) else: for answer in qa_pair['answers']: answer_begin = int(answer['answer_start']) answer_end = answer_begin + len(answer['text']) qp_pairs.append({'passage': passage, 'question': question, 'id': qa_id, 'answer_begin': answer_begin, 'answer_end': answer_end}) else: with open(path, newline='') as csvfile: reader = csv.reader(csvfile, delimiter='\t') line_num = 0 for row in reader: qp_pairs.append( {'passage': row[1], 'question': row[0], 'id': line_num}) line_num += 1 return qp_pairs
java
@Override public void rollback() throws CpoException { JdbcCpoAdapter currentResource = getCurrentResource(); if (currentResource != getLocalResource()) ((JdbcCpoTrxAdapter)currentResource).rollback(); }
python
def _nodedev_event_lifecycle_cb(conn, dev, event, detail, opaque): ''' Node device lifecycle events handler ''' _salt_send_event(opaque, conn, { 'nodedev': { 'name': dev.name() }, 'event': _get_libvirt_enum_string('VIR_NODE_DEVICE_EVENT_', event), 'detail': 'unknown' # currently unused })
java
private void read(InputStream is, Table table) throws IOException { byte[] headerBytes = new byte[6]; is.read(headerBytes); byte[] recordCountBytes = new byte[2]; is.read(recordCountBytes); //int recordCount = getShort(recordCountBytes, 0); //System.out.println("Header: " + new String(headerBytes) + " Record count:" + recordCount); byte[] buffer = new byte[m_definition.getRecordSize()]; while (true) { int bytesRead = is.read(buffer); if (bytesRead == -1) { break; } if (bytesRead != buffer.length) { throw new IOException("Unexpected end of file"); } if (buffer[0] == 0) { readRecord(buffer, table); } } }
java
public void write_attribute_asynch(final DeviceProxy deviceProxy, final DeviceAttribute[] attribs, final CallBack cb) throws DevFailed { final int id = write_attribute_asynch(deviceProxy, attribs); ApiUtil.set_async_reply_model(id, CALLBACK); ApiUtil.set_async_reply_cb(id, cb); // if push callback, start a thread to do it if (ApiUtil.get_asynch_cb_sub_model() == PUSH_CALLBACK) { final AsyncCallObject aco = ApiUtil.get_async_object(id); new CallbackThread(aco).start(); } }
python
def _set_alert(self, v, load=False): """ Setter method for alert, mapped from YANG variable /rbridge_id/threshold_monitor/interface/policy/area/alert (container) If this variable is read-only (config: false) in the source YANG file, then _set_alert is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_alert() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """alert must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=alert.alert, is_container='container', presence=False, yang_name="alert", rest_name="alert", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alert configuration', u'cli-suppress-show-conf-path': None, u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""", }) self.__alert = t if hasattr(self, '_set'): self._set()
java
private void redirectToJavaLoggerProxy() { DefaultLoggerProxy lp = DefaultLoggerProxy.class.cast(this.loggerProxy); JavaLoggerProxy jlp = new JavaLoggerProxy(lp.name, lp.level); // the order of assignments is important this.javaLoggerProxy = jlp; // isLoggable checks javaLoggerProxy if set this.loggerProxy = jlp; }
python
def compute(): """Compute the polynomial.""" if what == "numpy": y = eval(expr) else: y = ne.evaluate(expr) return len(y)
python
def get_gtf_argument_parser(desc, default_field_name='gene'): """Return an argument parser with basic options for reading GTF files. Parameters ---------- desc: str Description of the ArgumentParser default_field_name: str, optional Name of field in GTF file to look for. Returns ------- `argparse.ArgumentParser` object The argument parser. """ parser = cli.get_argument_parser(desc=desc) parser.add_argument( '-a', '--annotation-file', default='-', type=str, help=textwrap.dedent("""\ Path of Ensembl gene annotation file (in GTF format). The file may be gzip'ed. If set to ``-``, read from ``stdin``.""") ) parser.add_argument( '-o', '--output-file', required=True, type=str, help=textwrap.dedent("""\ Path of output file. If set to ``-``, print to ``stdout``, and redirect logging messages to ``stderr``.""") ) #parser.add_argument( # '-s', '--species', type=str, # choices=sorted(ensembl.SPECIES_CHROMPAT.keys()), default='human', # help=textwrap.dedent("""\ # Species for which to extract genes. (This parameter is ignored # if ``--chromosome-pattern`` is specified.)""") #) parser.add_argument( '-c', '--chromosome-pattern', type=str, required=False, default=None, help=textwrap.dedent("""\ Regular expression that chromosome names have to match. [None] """) ) #parser.add_argument( # '-f', '--field-name', type=str, default=default_field_name, # help=textwrap.dedent("""\ # Rows in the GTF file that do not contain this value # in the third column are ignored.""") #) cli.add_reporting_args(parser) return parser
java
protected LightweightTypeReference internalFindTopLevelType(Class<?> rawType) { try { ResourceSet resourceSet = getOwner().getContextResourceSet(); Resource typeResource = resourceSet.getResource(URIHelperConstants.OBJECTS_URI.appendSegment(rawType.getName()), true); List<EObject> resourceContents = typeResource.getContents(); if (resourceContents.isEmpty()) return null; JvmType type = (JvmType) resourceContents.get(0); return getOwner().newParameterizedTypeReference(type); } catch(WrappedException e) { /* no java project / class path context available, e.g. opened from history view */ return null; } }
python
def paths_to_polygons(paths, scale=None): """ Given a sequence of connected points turn them into valid shapely Polygon objects. Parameters ----------- paths : (n,) sequence Of (m,2) float, closed paths scale: float Approximate scale of drawing for precision Returns ----------- polys: (p,) list shapely.geometry.Polygon None """ polygons = [None] * len(paths) for i, path in enumerate(paths): if len(path) < 4: # since the first and last vertices are identical in # a closed loop a 4 vertex path is the minimum for # non-zero area continue try: polygons[i] = repair_invalid(Polygon(path), scale) except ValueError: # raised if a polygon is unrecoverable continue except BaseException: log.error('unrecoverable polygon', exc_info=True) polygons = np.array(polygons) return polygons
python
def encode2(self): """Return the base64 encoding of the fig attribute and insert in html image tag.""" buf = BytesIO() self.fig.savefig(buf, format='png', bbox_inches='tight', dpi=100) buf.seek(0) string = b64encode(buf.read()) return '<img src="data:image/png;base64,{0}">'.format(urlquote(string))
java
private static BufferedImage encodeImg(BufferedImage desImg, Float quality) { ByteArrayOutputStream baos = null; ByteArrayInputStream bais = null; BufferedImage bufferedImage = null; try { if (quality != null) { if (quality > 1.0 || quality < 0.0) { throw new Exception("quality参数指定值不正确"); } } baos = new ByteArrayOutputStream(); //图片采用JPEG格式编码 JPEGImageEncoder encoder = JPEGCodec.createJPEGEncoder(baos); if (quality != null) { //编码参数 JPEGEncodeParam jep = JPEGCodec.getDefaultJPEGEncodeParam(desImg); //设置压缩质量 jep.setQuality(quality, true); //开始编码并输出 encoder.encode(desImg, jep); } else { //开始编码并输出 encoder.encode(desImg); } bais = new ByteArrayInputStream(baos.toByteArray()); bufferedImage = ImageIO.read(bais); } catch (Exception e) { throw new RuntimeException(e); } finally { CommonUtils.closeIOStream(bais, baos); } return bufferedImage; }
java
public String getSemtype() { if (SurfaceForm_Type.featOkTst && ((SurfaceForm_Type)jcasType).casFeat_semtype == null) jcasType.jcas.throwFeatMissing("semtype", "ch.epfl.bbp.uima.types.SurfaceForm"); return jcasType.ll_cas.ll_getStringValue(addr, ((SurfaceForm_Type)jcasType).casFeatCode_semtype);}
python
def newest_packages( pypi_server="https://pypi.python.org/pypi?%3Aaction=packages_rss"): """ Constructs a request to the PyPI server and returns a list of :class:`yarg.parse.Package`. :param pypi_server: (option) URL to the PyPI server. >>> import yarg >>> yarg.newest_packages() [<Package yarg>, <Package gray>, <Package ragy>] """ items = _get(pypi_server) i = [] for item in items: i_dict = {'name': item[0].text.split()[0], 'url': item[1].text, 'description': item[3].text, 'date': item[4].text} i.append(Package(i_dict)) return i
java
public Set<String> keys() { Set<String> list = new LinkedHashSet<>(); Enumeration<String> keys = bundle.getKeys(); while (keys.hasMoreElements()) { list.add(keys.nextElement()); } return list; }
python
def auth(username, password): ''' Try and authenticate ''' try: keystone = client.Client(username=username, password=password, auth_url=get_auth_url()) return keystone.authenticate() except (AuthorizationFailure, Unauthorized): return False
java
private InputStream getInputStreamFromFile(File f) throws FileNotFoundException{ InputStream stream = null; if ( cacheRawFiles ){ stream = FlatFileCache.getInputStream(f.getAbsolutePath()); if ( stream == null){ FlatFileCache.addToCache(f.getAbsolutePath(),f); stream = FlatFileCache.getInputStream(f.getAbsolutePath()); } } if ( stream == null) stream = new FileInputStream(f); return stream; }
python
def from_dict(cls, cls_dict, fallback_xsi_type=None): """Parse the dictionary and return an Entity instance. This will attempt to extract type information from the input dictionary and pass it to entity_class to resolve the correct class for the type. Args: cls_dict: A dictionary representation of an Entity object. fallback_xsi_type: An xsi_type to use for string input, which doesn't have properties Returns: An Entity instance. """ if not cls_dict: return None if isinstance(cls_dict, six.string_types): if not getattr(cls, "_convert_strings", False): return cls_dict try: typekey = cls.dictkey(cls_dict) except TypeError: typekey = fallback_xsi_type klass = cls.entity_class(typekey) return klass.from_dict(cls_dict)
python
def _is_partial_index(gbi_file): """Check for truncated output since grabix doesn't write to a transactional directory. """ with open(gbi_file) as in_handle: for i, _ in enumerate(in_handle): if i > 2: return False return True
python
def oplot(self,x,y,panel=None,**kw): """generic plotting method, overplotting any existing plot """ if panel is None: panel = self.current_panel opts = {} opts.update(self.default_panelopts) opts.update(kws) self.panels[panel].oplot(x, y, **opts)
python
def make_symbols(symbols, *args): """Return a list of uppercase strings like "GOOG", "$SPX, "XOM"... Arguments: symbols (str or list of str): list of market ticker symbols to normalize If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols Returns: list of str: list of cananical ticker symbol strings (typically after .upper().strip()) See Also: pug.dj.db.normalize_names Examples: >>> make_symbols("Goog") ['GOOG'] >>> make_symbols(" $SPX ", " aaPL ") ['$SPX', 'AAPL'] >>> make_symbols(["$SPX", ["GOOG", "AAPL"]]) ['GOOG', 'AAPL', '$SPX'] >>> make_symbols(" $Spy, Goog, aAPL ") ['$SPY', 'GOOG', 'AAPL'] """ if (hasattr(symbols, '__iter__') and not any(symbols)) \ or (isinstance(symbols, (list, tuple, Mapping)) and not symbols): return [] if isinstance(symbols, basestring): # # FIXME: find a direct API for listing all possible symbols # try: # return list(set(dataobj.get_symbols_from_list(symbols))) # except: return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))] else: ans = [] for sym in (list(symbols) + list(args)): tmp = make_symbols(sym) ans = ans + tmp return list(set(ans))
python
def _validate_prepare_time(self, t, pos_c): """ Make sure that t is a 1D array and compatible with the C position array. """ if hasattr(t, 'unit'): t = t.decompose(self.units).value if not isiterable(t): t = np.atleast_1d(t) t = np.ascontiguousarray(t.ravel()) if len(t) > 1: if len(t) != pos_c.shape[0]: raise ValueError("If passing in an array of times, it must have a shape " "compatible with the input position(s).") return t
java
public static boolean isSupportedWriteTransactionOperation(String methodName) { return (ObjectUtils.nullSafeEquals(methodName, BIND_METHOD_NAME) || ObjectUtils.nullSafeEquals(methodName, REBIND_METHOD_NAME) || ObjectUtils.nullSafeEquals(methodName, RENAME_METHOD_NAME) || ObjectUtils.nullSafeEquals(methodName, MODIFY_ATTRIBUTES_METHOD_NAME) || ObjectUtils.nullSafeEquals(methodName, UNBIND_METHOD_NAME)); }
java
public final Table getTable(TableName name) { GetTableRequest request = GetTableRequest.newBuilder().setName(name == null ? null : name.toString()).build(); return getTable(request); }
python
def phmsdms(hmsdms): """Parse a string containing a sexagesimal number. This can handle several types of delimiters and will process reasonably valid strings. See examples. Parameters ---------- hmsdms : str String containing a sexagesimal number. Returns ------- d : dict parts : a 3 element list of floats The three parts of the sexagesimal number that were identified. vals : 3 element list of floats The numerical values of the three parts of the sexagesimal number. sign : int Sign of the sexagesimal number; 1 for positive and -1 for negative. units : {"degrees", "hours"} The units of the sexagesimal number. This is infered from the characters present in the string. If it a pure number then units is "degrees". Examples -------- >>> phmsdms("12") == { ... 'parts': [12.0, None, None], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [12.0, 0.0, 0.0] ... } True >>> phmsdms("12h") == { ... 'parts': [12.0, None, None], ... 'sign': 1, ... 'units': 'hours', ... 'vals': [12.0, 0.0, 0.0] ... } True >>> phmsdms("12d13m14.56") == { ... 'parts': [12.0, 13.0, 14.56], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [12.0, 13.0, 14.56] ... } True >>> phmsdms("12d13m14.56") == { ... 'parts': [12.0, 13.0, 14.56], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [12.0, 13.0, 14.56] ... } True >>> phmsdms("12d14.56ss") == { ... 'parts': [12.0, None, 14.56], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [12.0, 0.0, 14.56] ... } True >>> phmsdms("14.56ss") == { ... 'parts': [None, None, 14.56], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [0.0, 0.0, 14.56] ... } True >>> phmsdms("12h13m12.4s") == { ... 'parts': [12.0, 13.0, 12.4], ... 'sign': 1, ... 'units': 'hours', ... 'vals': [12.0, 13.0, 12.4] ... } True >>> phmsdms("12:13:12.4s") == { ... 'parts': [12.0, 13.0, 12.4], ... 'sign': 1, ... 'units': 'degrees', ... 'vals': [12.0, 13.0, 12.4] ... } True But `phmsdms("12:13mm:12.4s")` will not work. """ units = None sign = None # Floating point regex: # http://www.regular-expressions.info/floatingpoint.html # # pattern1: find a decimal number (int or float) and any # characters following it upto the next decimal number. [^0-9\-+]* # => keep gathering elements until we get to a digit, a - or a # +. These three indicates the possible start of the next number. pattern1 = re.compile(r"([-+]?[0-9]*\.?[0-9]+[^0-9\-+]*)") # pattern2: find decimal number (int or float) in string. pattern2 = re.compile(r"([-+]?[0-9]*\.?[0-9]+)") hmsdms = hmsdms.lower() hdlist = pattern1.findall(hmsdms) parts = [None, None, None] def _fill_right_not_none(): # Find the pos. where parts is not None. Next value must # be inserted to the right of this. If this is 2 then we have # already filled seconds part, raise exception. If this is 1 # then fill 2. If this is 0 fill 1. If none of these then fill # 0. rp = reversed(parts) for i, j in enumerate(rp): if j is not None: break if i == 0: # Seconds part already filled. raise ValueError("Invalid string.") elif i == 1: parts[2] = v elif i == 2: # Either parts[0] is None so fill it, or it is filled # and hence fill parts[1]. if parts[0] is None: parts[0] = v else: parts[1] = v for valun in hdlist: try: # See if this is pure number. v = float(valun) # Sexagesimal part cannot be determined. So guess it by # seeing which all parts have already been identified. _fill_right_not_none() except ValueError: # Not a pure number. Infer sexagesimal part from the # suffix. if "hh" in valun or "h" in valun: m = pattern2.search(valun) parts[0] = float(valun[m.start():m.end()]) units = "hours" if "dd" in valun or "d" in valun: m = pattern2.search(valun) parts[0] = float(valun[m.start():m.end()]) units = "degrees" if "mm" in valun or "m" in valun: m = pattern2.search(valun) parts[1] = float(valun[m.start():m.end()]) if "ss" in valun or "s" in valun: m = pattern2.search(valun) parts[2] = float(valun[m.start():m.end()]) if "'" in valun: m = pattern2.search(valun) parts[1] = float(valun[m.start():m.end()]) if '"' in valun: m = pattern2.search(valun) parts[2] = float(valun[m.start():m.end()]) if ":" in valun: # Sexagesimal part cannot be determined. So guess it by # seeing which all parts have already been identified. v = valun.replace(":", "") v = float(v) _fill_right_not_none() if not units: units = "degrees" # Find sign. Only the first identified part can have a -ve sign. for i in parts: if i and i < 0.0: if sign is None: sign = -1 else: raise ValueError("Only one number can be negative.") if sign is None: # None of these are negative. sign = 1 vals = [abs(i) if i is not None else 0.0 for i in parts] return dict(sign=sign, units=units, vals=vals, parts=parts)
java
private static RoundRectangle2D.Double toRoundRect(final Rectangle2D pRectangle, final int pArcW, final int pArcH) { return new RoundRectangle2D.Double( pRectangle.getX(), pRectangle.getY(), pRectangle.getWidth(), pRectangle.getHeight(), pArcW, pArcH); }
java
private void registerInternal(final JobID id, final String[] requiredJarFiles) throws IOException { // Use spin lock here while (this.lockMap.putIfAbsent(id, LOCK_OBJECT) != null); try { if (incrementReferenceCounter(id) > 1) { return; } // Check if library manager entry for this id already exists if (this.libraryManagerEntries.containsKey(id)) { throw new IllegalStateException("Library cache manager already contains entry for job ID " + id); } // Check if all the required jar files exist in the cache URL[] urls = null; if (requiredJarFiles != null) { urls = new URL[requiredJarFiles.length]; for (int i = 0; i < requiredJarFiles.length; i++) { final Path p = contains(requiredJarFiles[i]); if (p == null) { throw new IOException(requiredJarFiles[i] + " does not exist in the library cache"); } // Add file to the URL array try { urls[i] = p.toUri().toURL(); } catch (MalformedURLException e) { throw new IOException(StringUtils.stringifyException(e)); } } } final LibraryManagerEntry entry = new LibraryManagerEntry(id, requiredJarFiles, urls); this.libraryManagerEntries.put(id, entry); } finally { this.lockMap.remove(id); } }
java
public static mpsuser add(nitro_service client, mpsuser resource) throws Exception { resource.validate("add"); return ((mpsuser[]) resource.perform_operation(client, "add"))[0]; }