language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public FieldScopeLogicMap<V> with(FieldScopeLogic fieldScopeLogic, V value) { ImmutableList.Builder<Entry<V>> newEntries = ImmutableList.builder(); // Earlier entries override later ones, so we insert the new one at the front of the list. newEntries.add(Entry.of(fieldScopeLogic, value)); newEntries.addAll(entries); return new FieldScopeLogicMap<>(newEntries.build()); }
python
def reduce(self, func, dim=None, axis=None, keep_attrs=None, **kwargs): """Reduce this array by applying `func` along some dimension(s). Parameters ---------- func : function Function which can be called in the form `f(x, axis=axis, **kwargs)` to return the result of reducing an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional Axis(es) over which to repeatedly apply `func`. Only one of the 'dim' and 'axis' arguments can be supplied. If neither are supplied, then the reduction is calculated over the flattened array (by calling `f(x)` without an axis argument). keep_attrs : bool, optional If True, the variable's attributes (`attrs`) will be copied from the original object to the new one. If False (default), the new object will be returned without attributes. **kwargs : dict Additional keyword arguments passed on to `func`. Returns ------- reduced : DataArray DataArray with this object's array replaced with an array with summarized data and the indicated dimension(s) removed. """ var = self.variable.reduce(func, dim, axis, keep_attrs, **kwargs) return self._replace_maybe_drop_dims(var)
java
@Override public JavaClass findClass(/*@Nonnull*/ String className) { // Make sure we handle class names with slashes. // If we don't, we can get into serious trouble: a previously // loaded class will appear to be missing (because we're using the // wrong name to look it up) and be evicted by some other random // version of the class loaded from the classpath. String dottedClassName = className.replace('/', '.'); return nameToClassMap.get(dottedClassName); }
python
def expand(self, vs=None, conj=False): """Return the Shannon expansion with respect to a list of variables.""" vs = self._expect_vars(vs) if vs: outer, inner = (And, Or) if conj else (Or, And) terms = [inner(self.restrict(p), *boolfunc.point2term(p, conj)) for p in boolfunc.iter_points(vs)] if conj: terms = [term for term in terms if term is not One] else: terms = [term for term in terms if term is not Zero] return outer(*terms, simplify=False) else: return self
java
public static <K> Matcher<Multimap<K, ?>> hasSameKeySet(final Multimap<K, ?> comparison) { return IsMultimapWithKeySet.hasSameKeySet(comparison.keySet()); }
python
def write(self, data): """Buffers some data to be sent to the host:port in a non blocking way. So the data is always buffered and not sent on the socket in a synchronous way. You can give a WriteBuffer as parameter. The internal Connection WriteBuffer will be extended with this one (without copying). Args: data (str or WriteBuffer): string (or WriteBuffer) to write to the host:port. """ if isinstance(data, WriteBuffer): self._write_buffer.append(data) else: if len(data) > 0: self._write_buffer.append(data) if self.aggressive_write: self._handle_write() if self._write_buffer._total_length > 0: self._register_or_update_event_handler(write=True)
python
def script_file(self): """ Returns the startup script file for this VPCS VM. :returns: path to startup script file """ # use the default VPCS file if it exists path = os.path.join(self.working_dir, 'startup.vpc') if os.path.exists(path): return path else: return None
python
def generate_values(self, *args, **kwargs): """ Instantiate a random variable and apply annual growth factors. :return: """ assert 'ref value' in self.kwargs # 1. Generate $\mu$ start_date = self.times[0].to_pydatetime() end_date = self.times[-1].to_pydatetime() ref_date = self.ref_date mu = self.generate_mu(end_date, ref_date, start_date) # 3. Generate $\sigma$ ## Prepare array with growth values $\sigma$ if self.sample_mean_value: sigma = np.zeros((len(self.times), self.size)) else: if self.kwargs['type'] == 'interp': def get_date(record): return datetime.datetime.strptime(record[0], "%Y-%m-%d") ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date) intial_value = ref_value_[0][1] else: intial_value = self.kwargs['ref value'] variability_ = intial_value * self.kwargs['initial_value_proportional_variation'] logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}') sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size)) ## 4. Prepare growth array for $\alpha_{sigma}$ alpha_sigma = growth_coefficients(start_date, end_date, ref_date, self.kwargs['ef_growth_factor'], 1) ### 5. Prepare DataFrame iterables = [self.times, range(self.size)] index_names = ['time', 'samples'] _multi_index = pd.MultiIndex.from_product(iterables, names=index_names) df = pd.DataFrame(index=_multi_index, dtype=float) from dateutil import relativedelta r = relativedelta.relativedelta(end_date, start_date) months = r.years * 12 + r.months + 1 name = kwargs['name'] ## Apply growth to $\sigma$ and add $\sigma$ to $\mu$ df[name] = ((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel() ## test if df has sub-zero values df_sigma__dropna = df[name].where(df[name] < 0).dropna() if not df_sigma__dropna.empty: logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}") return df[name]
java
private PointF transformCoordBitmapToTouch(float bx, float by) { matrix.getValues(m); float origW = getDrawable().getIntrinsicWidth(); float origH = getDrawable().getIntrinsicHeight(); float px = bx / origW; float py = by / origH; float finalX = m[Matrix.MTRANS_X] + getImageWidth() * px; float finalY = m[Matrix.MTRANS_Y] + getImageHeight() * py; return new PointF(finalX, finalY); }
java
public SIDestinationAddress getDestinationAddress() { if (TraceComponent.isAnyTracingEnabled() && CoreSPIProducerSession.tc.isEntryEnabled()) SibTr.entry( CoreSPIProducerSession.tc, "getDestinationAddress", this); // if(_ != null) // { // if (TraceComponent.isAnyTracingEnabled() && CoreSPIProducerSession.tc.isEntryEnabled()) // SibTr.exit(CoreSPIProducerSession.tc, "getDestinationAddress", _routingDestinationAddr); // return _routingDestinationAddr; // } if (_address == null) { SIDestinationAddress destAddr = DestinationSessionUtils.createJsDestinationAddress(_destination); if (TraceComponent.isAnyTracingEnabled() && CoreSPIProducerSession.tc.isEntryEnabled()) SibTr.exit(CoreSPIProducerSession.tc, "getDestinationAddress", destAddr); return destAddr; } else { if (TraceComponent.isAnyTracingEnabled() && CoreSPIProducerSession.tc.isEntryEnabled()) SibTr.exit(CoreSPIProducerSession.tc, "getDestinationAddress", _address); return _address; } }
java
protected <T> Map<String, CompletableFuture<T>> executeOnMasters( Function<RedisClusterAsyncCommands<K, V>, RedisFuture<T>> function) { return executeOnNodes(function, redisClusterNode -> redisClusterNode.is(MASTER)); }
java
public synchronized String createTempFile(CmsObject cms, String resourceName, CmsUUID currentProjectId) throws CmsException { // check that the current user has write permissions if (!cms.hasPermissions(cms.readResource(resourceName, CmsResourceFilter.ALL), CmsPermissionSet.ACCESS_WRITE)) { throw new CmsPermissionViolationException( org.opencms.db.Messages.get().container(org.opencms.db.Messages.ERR_PERM_DENIED_2, resourceName, "w")); } // initialize admin cms context CmsObject adminCms = getAdminCms(cms); // generate the filename of the temporary file String temporaryFilename = CmsWorkplace.getTemporaryFileName(resourceName); // check if the temporary file is already present if (adminCms.existsResource(temporaryFilename, CmsResourceFilter.ALL)) { // delete old temporary file if (!cms.getLock(temporaryFilename).isUnlocked()) { // steal lock cms.changeLock(temporaryFilename); } else { // lock resource to current user cms.lockResource(temporaryFilename); } cms.deleteResource(temporaryFilename, CmsResource.DELETE_PRESERVE_SIBLINGS); } try { // switch to the temporary file project adminCms.getRequestContext().setCurrentProject(cms.readProject(getTempFileProjectId())); // copy the file to edit to a temporary file adminCms.copyResource(resourceName, temporaryFilename, CmsResource.COPY_AS_NEW); } finally { // switch back to current project adminCms.getRequestContext().setCurrentProject(cms.readProject(currentProjectId)); } try { // switch to the temporary file project cms.getRequestContext().setCurrentProject( cms.readProject(OpenCms.getWorkplaceManager().getTempFileProjectId())); // lock the temporary file cms.changeLock(temporaryFilename); // touch the temporary file cms.setDateLastModified(temporaryFilename, System.currentTimeMillis(), false); // set the temporary file flag CmsResource tempFile = cms.readResource(temporaryFilename, CmsResourceFilter.ALL); int flags = tempFile.getFlags(); if ((flags & CmsResource.FLAG_TEMPFILE) == 0) { flags += CmsResource.FLAG_TEMPFILE; } cms.chflags(temporaryFilename, flags); // remove eventual release & expiration date from temporary file to make preview in editor work cms.setDateReleased(temporaryFilename, CmsResource.DATE_RELEASED_DEFAULT, false); cms.setDateExpired(temporaryFilename, CmsResource.DATE_EXPIRED_DEFAULT, false); // remove visibility permissions for everybody on temporary file if possible if (cms.hasPermissions(tempFile, CmsPermissionSet.ACCESS_CONTROL)) { cms.chacc( temporaryFilename, I_CmsPrincipal.PRINCIPAL_GROUP, OpenCms.getDefaultUsers().getGroupUsers(), "-v"); } } finally { // switch back to current project cms.getRequestContext().setCurrentProject(cms.readProject(currentProjectId)); } return temporaryFilename; }
java
public long getDateField(String name) { String val = valueParameters(get(name),null); if (val==null) return -1; if (_dateReceive==null) { _dateReceive=(SimpleDateFormat[])__dateReceiveCache.get(); if (_dateReceive==null) { _dateReceive=(SimpleDateFormat[]) new SimpleDateFormat[__dateReceiveSource.length]; __dateReceiveCache.set(_dateReceive); } } for (int i=0;i<_dateReceive.length;i++) { // clone formatter for thread safety if (_dateReceive[i]==null) _dateReceive[i]=(SimpleDateFormat)__dateReceiveSource[i].clone(); try{ Date date=(Date)_dateReceive[i].parseObject(val); return date.getTime(); } catch(java.lang.Exception e) { LogSupport.ignore(log,e); } } if (val.endsWith(" GMT")) { val=val.substring(0,val.length()-4); for (int i=0;i<_dateReceive.length;i++) { try{ Date date=(Date)_dateReceive[i].parseObject(val); return date.getTime(); } catch(java.lang.Exception e) { LogSupport.ignore(log,e); } } } throw new IllegalArgumentException(val); }
java
static Predicate<DateValue> weekIntervalFilter( final int interval, final Weekday wkst, final DateValue dtStart) { return new Predicate<DateValue>() { DateValue wkStart; { // the latest day with day of week wkst on or before dtStart DTBuilder wkStartB = new DTBuilder(dtStart); wkStartB.day -= (7 + Weekday.valueOf(dtStart).javaDayNum - wkst.javaDayNum) % 7; wkStart = wkStartB.toDate(); } public boolean apply(DateValue date) { int daysBetween = TimeUtils.daysBetween(date, wkStart); if (daysBetween < 0) { // date must be before dtStart. Shouldn't occur in practice. daysBetween += (interval * 7 * (1 + daysBetween / (-7 * interval))); } int off = (daysBetween / 7) % interval; return 0 == off; } }; }
python
def polygon_to_points(coords, z=None): """ Given a list of pairs of points which define a polygon, return a list of points interior to the polygon """ bounds = array(coords).astype('int') bmax = bounds.max(0) bmin = bounds.min(0) path = Path(bounds) grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1)) grid_flat = zip(grid[0].ravel(), grid[1].ravel()) points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int') points = where(points) points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist() if z is not None: points = map(lambda p: [p[0], p[1], z], points) return points
java
private static void writeResourceToFile( String resourceName, File file) throws IOException { if (file == null) { throw new NullPointerException("Target file may not be null"); } if (file.exists()) { throw new IllegalArgumentException( "Target file already exists: "+file); } InputStream inputStream = LibUtils.class.getResourceAsStream(resourceName); if (inputStream == null) { throw new IOException( "No resource found with name '"+resourceName+"'"); } OutputStream outputStream = null; try { outputStream = new FileOutputStream(file); byte[] buffer = new byte[32768]; while (true) { int read = inputStream.read(buffer); if (read < 0) { break; } outputStream.write(buffer, 0, read); } outputStream.flush(); } finally { if (outputStream != null) { try { outputStream.close(); } catch (IOException e) { logger.log(Level.SEVERE, e.getMessage(), e); } } try { inputStream.close(); } catch (IOException e) { logger.log(Level.SEVERE, e.getMessage(), e); } } }
java
public static String calcNormalDensity(String slsnd, String slcly, String omPct) { String satMt = calcSaturatedMoisture(slsnd, slcly, omPct); String ret = product(substract("100", satMt), "0.0265"); LOG.debug("Calculate result for Normal density, g/cm-3 is {}", ret); return ret; }
java
public static FlacFile open(InputStream inp) throws IOException, FileNotFoundException { inp.mark(4); byte[] header = new byte[4]; IOUtils.readFully(inp, header); inp.reset(); if(header[0] == (byte)'O' && header[1] == (byte)'g' && header[2] == (byte)'g' && header[3] == (byte)'S') { return new FlacOggFile(new OggFile(inp)); } if(header[0] == (byte)'f' && header[1] == (byte)'L' && header[2] == (byte)'a' && header[3] == (byte)'C') { return new FlacNativeFile(inp); } throw new IllegalArgumentException("File type not recognised"); }
java
protected <T> T defaultIfNotSet(String propertyName, T defaultValue, Class<T> type) { return (isSet(propertyName) ? convert(propertyName, type) : defaultValue); }
python
def get_question_form_for_create(self, item_id, question_record_types): """Gets the question form for creating new questions. A new form should be requested for each create transaction. arg: item_id (osid.id.Id): an assessment item ``Id`` arg: question_record_types (osid.type.Type[]): array of question record types to be included in the create operation or an empty list if none return: (osid.assessment.QuestionForm) - the question form raise: NullArgument - ``question_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.learning.ActivityAdminSession.get_activity_form_for_create_template if not isinstance(item_id, ABCId): raise errors.InvalidArgument('argument is not a valid OSID Id') for arg in question_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if question_record_types == []: # WHY are we passing bank_id = self._catalog_id below, seems redundant: obj_form = objects.QuestionForm( bank_id=self._catalog_id, item_id=item_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) else: obj_form = objects.QuestionForm( bank_id=self._catalog_id, record_types=question_record_types, item_id=item_id, catalog_id=self._catalog_id, runtime=self._runtime, proxy=self._proxy) obj_form._for_update = False self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
java
public ListResourceComplianceSummariesResult withResourceComplianceSummaryItems(ResourceComplianceSummaryItem... resourceComplianceSummaryItems) { if (this.resourceComplianceSummaryItems == null) { setResourceComplianceSummaryItems(new com.amazonaws.internal.SdkInternalList<ResourceComplianceSummaryItem>(resourceComplianceSummaryItems.length)); } for (ResourceComplianceSummaryItem ele : resourceComplianceSummaryItems) { this.resourceComplianceSummaryItems.add(ele); } return this; }
python
def validate(self, data): """ Check that the video data is valid. """ if data is not None and not isinstance(data, dict): raise serializers.ValidationError("Invalid data") try: profiles = [ev["profile"] for ev in data.get("encoded_videos", [])] if len(profiles) != len(set(profiles)): raise serializers.ValidationError("Invalid data: duplicate profiles") except KeyError: raise serializers.ValidationError("profile required for deserializing") except TypeError: raise serializers.ValidationError("profile field needs to be a profile_name (str)") # Clean course_video list from any invalid data. course_videos = [(course_video, image) for course_video, image in data.get('courses', []) if course_video] data['courses'] = course_videos return data
java
public int clearDiskCache() { final String methodName = "clearDiskCache()"; int returnCode = NO_EXCEPTION; Exception diskException = null; try { this.invalidationBuffer.setDiskClearInProgress(true); if (delayOffload) { if (!this.disableDependencyId) { auxDataDependencyTable.clear(); } if (!this.disableTemplatesSupport) { auxTemplateDependencyTable.clear(); } } stop(COMPLETE_CLEAR); try { rwLock.writeLock().lock(); closeNoRWLock(); deleteDiskCacheFiles(); // delete disk cache files this.cod.diskCacheSizeInfo.reset(); init_files(); //restart things this.cod.enableCacheSizeInBytes = true; this.cod.currentCacheSizeInBytes = this.minDiskCacheSizeInBytes; if (this.cod.diskCacheSizeInfo.diskCacheSizeInGBLimit > 0) { this.cache.setEnableDiskCacheSizeInBytesChecking(true); } if (this.cod.evictionPolicy != CacheConfig.EVICTION_NONE) { synchronized (evictionTableMonitor) { this.EvictionTable.clear(); } } } catch (FileManagerException ex) { this.diskCacheException = ex; diskException = ex; returnCode = DISK_EXCEPTION; } catch (HashtableOnDiskException ex) { this.diskCacheException = ex; diskException = ex; returnCode = DISK_EXCEPTION; } catch (IOException ex) { this.diskCacheException = ex; diskException = ex; returnCode = DISK_EXCEPTION; } catch (Exception ex) { returnCode = OTHER_EXCEPTION; diskException = ex; } finally { if (returnCode != NO_EXCEPTION) { if (tc.isDebugEnabled()) Tr.debug(tc, methodName, "cacheName=" + this.cacheName + "\n Exception: " + ExceptionUtility.getStackTrace(diskException)); } if (returnCode == DISK_EXCEPTION || returnCode == OTHER_EXCEPTION) { com.ibm.ws.ffdc.FFDCFilter.processException(diskException, "com.ibm.ws.cache.HTODDynacache.clearDiskCache", "525", this); } rwLock.writeLock().unlock(); } } finally { this.invalidationBuffer.setDiskClearInProgress(false); } return returnCode; }
python
def generate_classes(outf, msgs): """ Generate the implementations of the classes representing MAVLink messages. """ print("Generating class definitions") wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent="") outf.write("\nmavlink.messages = {};\n\n"); def field_descriptions(fields): ret = "" for f in fields: ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type) return ret for m in msgs: comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields)) selffieldnames = 'self, ' for f in m.fields: # if f.omit_arg: # selffieldnames += '%s=%s, ' % (f.name, f.const_value) #else: # -- Omitting the code above because it is rarely used (only once?) and would need some special handling # in javascript. Specifically, inside the method definition, it needs to check for a value then assign # a default. selffieldnames += '%s, ' % f.name selffieldnames = selffieldnames[:-2] sub = {'NAMELOWER' : m.name.lower(), 'SELFFIELDNAMES' : selffieldnames, 'COMMENT' : comment, 'FIELDNAMES' : ", ".join(m.fieldnames)} t.write(outf, """ /* ${COMMENT} */ """, sub) # function signature + declaration outf.write("mavlink.messages.%s = function(" % (m.name.lower())) if len(m.fields) != 0: outf.write(", ".join(m.fieldnames)) outf.write(") {") # body: set message type properties outf.write(""" this.format = '%s'; this.id = mavlink.MAVLINK_MSG_ID_%s; this.order_map = %s; this.crc_extra = %u; this.name = '%s'; """ % (m.fmtstr, m.name.upper(), m.order_map, m.crc_extra, m.name.upper())) # body: set own properties if len(m.fieldnames) != 0: outf.write(" this.fieldnames = ['%s'];\n" % "', '".join(m.fieldnames)) outf.write(""" this.set(arguments); } """) # inherit methods from the base message class outf.write(""" mavlink.messages.%s.prototype = new mavlink.message; """ % m.name.lower()) # Implement the pack() function for this message outf.write(""" mavlink.messages.%s.prototype.pack = function(mav) { return mavlink.message.prototype.pack.call(this, mav, this.crc_extra, jspack.Pack(this.format""" % m.name.lower()) if len(m.fields) != 0: outf.write(", [ this." + ", this.".join(m.ordered_fieldnames) + ']') outf.write("));\n}\n\n")
python
def get_tip_labels(self, idx=None): """ Returns tip labels in the order they will be plotted on the tree, i.e., starting from zero axis and counting up by units of 1 (bottom to top in right-facing trees; left to right in down-facing). If 'idx' is indicated then a list of tip labels descended from that node will be returned, instead of all tip labels. This is useful in combination with other functions that select nodes/clades of the tree based on a list of tip labels. You can use the toytree draw() command with tip_labels='idx' or tip_labels=True to see idx labels plotted on nodes. Parameters: idx (int): index label of a node. Example: # select a clade of the tree and use it for rooting. tiplist = tre.get_descenants_from_idx(21) tre.root(names=tiplist) """ if not idx: return self.treenode.get_leaf_names()[::-1] else: treenode = self.treenode.search_nodes(idx=idx)[0] return treenode.get_leaf_names()[::-1]
python
def get_scoring_data_iters(sources: List[str], target: str, source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, batch_size: int, batch_num_devices: int, max_seq_len_source: int, max_seq_len_target: int) -> 'BaseParallelSampleIter': """ Returns a data iterator for scoring. The iterator loads data on demand, batch by batch, and does not skip any lines. Lines that are too long are truncated. :param sources: Path to source training data (with optional factor data paths). :param target: Path to target training data. :param source_vocabs: Source vocabulary and optional factor vocabularies. :param target_vocab: Target vocabulary. :param batch_size: Batch size. :param batch_num_devices: Number of devices batches will be parallelized across. :param max_seq_len_source: Maximum source sequence length. :param max_seq_len_target: Maximum target sequence length. :return: The scoring data iterator. """ logger.info("==============================") logger.info("Creating scoring data iterator") logger.info("==============================") # One bucket to hold them all, bucket = (max_seq_len_source, max_seq_len_target) # ...One loader to raise them, data_loader = RawParallelDatasetLoader(buckets=[bucket], eos_id=target_vocab[C.EOS_SYMBOL], pad_id=C.PAD_ID, skip_blanks=False) # ...one iterator to traverse them all, scoring_iter = BatchedRawParallelSampleIter(data_loader=data_loader, sources=sources, target=target, source_vocabs=source_vocabs, target_vocab=target_vocab, bucket=bucket, batch_size=batch_size, max_lens=(max_seq_len_source, max_seq_len_target), num_factors=len(sources)) # and with the model appraise them. return scoring_iter
java
public EClass getEndSegmentCommand() { if (endSegmentCommandEClass == null) { endSegmentCommandEClass = (EClass)EPackage.Registry.INSTANCE.getEPackage(AfplibPackage.eNS_URI).getEClassifiers().get(437); } return endSegmentCommandEClass; }
java
private Collection<TypeElement> subinterfaces(TypeElement te) { Collection<TypeElement> ret = classToSubinterface.get(te); if (ret == null) { ret = new TreeSet<>(utils.makeClassUseComparator()); Set<TypeElement> subs = classtree.subInterfaces(te); if (subs != null) { ret.addAll(subs); for (TypeElement sub : subs) { ret.addAll(subinterfaces(sub)); } } addAll(classToSubinterface, te, ret); } return ret; }
python
def disc(ghi, altitude, doy, pressure=101325, min_sin_altitude=0.065, min_altitude=3, max_airmass=12): """ Estimate Direct Normal Irradiance from Global Horizontal Irradiance using the DISC model. The DISC algorithm converts global horizontal irradiance to direct normal irradiance through empirical relationships between the global and direct clearness indices. This implementation limits the clearness index to 1 by default. The original report describing the DISC model [1]_ uses the relative airmass rather than the absolute (pressure-corrected) airmass. However, the NREL implementation of the DISC model [2]_ uses absolute airmass. PVLib Matlab also uses the absolute airmass. pvlib python defaults to absolute airmass, but the relative airmass can be used by supplying `pressure=None`. Note: [1] Maxwell, E. L., "A Quasi-Physical Model for Converting Hourly Global Horizontal to Direct Normal Insolation", Technical Report No. SERI/TR-215-3087, Golden, CO: Solar Energy Research Institute, 1987. [2] Maxwell, E. "DISC Model", Excel Worksheet. https://www.nrel.gov/grid/solar-resource/disc.html Args: ghi : numeric Global horizontal irradiance in W/m^2. altitude : numeric True (not refraction-corrected) solar altitude angles in decimal degrees. doy : An integer representing the day of the year. pressure : None or numeric, default 101325 Site pressure in Pascal. If None, relative airmass is used instead of absolute (pressure-corrected) airmass. min_sin_altitude : numeric, default 0.065 Minimum value of sin(altitude) to allow when calculating global clearness index `kt`. Equivalent to altitude = 3.727 degrees. min_altitude : numeric, default 87 Minimum value of altitude to allow in DNI calculation. DNI will be set to 0 for times with altitude values smaller than `min_altitude`. max_airmass : numeric, default 12 Maximum value of the airmass to allow in Kn calculation. Default value (12) comes from range over which Kn was fit to airmass in the original paper. Returns: dni: The modeled direct normal irradiance in W/m^2 provided by the Direct Insolation Simulation Code (DISC) model. kt: Ratio of global to extraterrestrial irradiance on a horizontal plane. am: Airmass """ if altitude > min_altitude and ghi > 0: # this is the I0 calculation from the reference # SSC uses solar constant = 1367.0 (checked 2018 08 15) I0 = get_extra_radiation(doy, 1370.) kt = clearness_index(ghi, altitude, I0, min_sin_altitude=min_sin_altitude, max_clearness_index=1) am = get_relative_airmass(altitude, model='kasten1966') if pressure is not None: am = get_absolute_airmass(am, pressure) Kn, am = _disc_kn(kt, am, max_airmass=max_airmass) dni = Kn * I0 dni = max(dni, 0) return dni, kt, am else: return 0, 0, None
java
public static boolean checkMmul(INDArray first, INDArray second, double maxRelativeDifference, double minAbsDifference) { if (first.size(1) != second.size(0)) throw new IllegalArgumentException("first.columns != second.rows"); RealMatrix rmFirst = convertToApacheMatrix(first); RealMatrix rmSecond = convertToApacheMatrix(second); INDArray result = first.mmul(second); RealMatrix rmResult = rmFirst.multiply(rmSecond); if (!checkShape(rmResult, result)) return false; boolean ok = checkEntries(rmResult, result, maxRelativeDifference, minAbsDifference); if (!ok) { INDArray onCopies = Shape.toOffsetZeroCopy(first).mmul(Shape.toOffsetZeroCopy(second)); printFailureDetails(first, second, rmResult, result, onCopies, "mmul"); } return ok; }
java
@Override public boolean subsumes(Atomic atomic) { if (!atomic.isAtom()) return false; Atom parent = (Atom) atomic; MultiUnifier multiUnifier = this.getMultiUnifier(parent, UnifierType.SUBSUMPTIVE); if (multiUnifier.isEmpty()) return false; MultiUnifier inverse = multiUnifier.inverse(); return//check whether propagated answers would be complete !inverse.isEmpty() && inverse.stream().allMatch(u -> u.values().containsAll(this.getVarNames())) && !parent.getPredicates(NeqPredicate.class).findFirst().isPresent() && !this.getPredicates(NeqPredicate.class).findFirst().isPresent(); }
java
void undoAdd(Link newLink) throws ObjectManagerException { final String methodName = "undoAdd"; if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.entry(this, cclass, methodName, new Object[] { newLink }); // Detach this Link from the list. if (newLink.next == null) { // Are we the tail of list? tail = newLink.previous; } else { // Not tail of the list. // Join up the backwards Link. Link nextLink = (Link) newLink.next.getManagedObject(); nextLink.previous = newLink.previous; } // if at tail of list. if (newLink.previous == null) { // Are we the head of list? head = newLink.next; } else { // Not head of the list. // Join up the forwards Link. Link previousLink = (Link) newLink.previous.getManagedObject(); previousLink.next = newLink.next; } // if at head of list. // Reset the availableHead. availableHead = head; skipToBeDeleted(); decrementSize(); // Adjust list length. // Give back all of the remaining space. owningToken.objectStore.reserve((int) -reservedSpaceInStore, false); if (Tracing.isAnyTracingEnabled() && trace.isEntryEnabled()) trace.exit(this, cclass, methodName); }
java
private void verifyFrameOpcode(WebSocketFrame frame) throws WebSocketException { switch (frame.getOpcode()) { case CONTINUATION: case TEXT: case BINARY: case CLOSE: case PING: case PONG: // Known opcode return; default: break; } // If extended use of web socket frames is allowed. if (mWebSocket.isExtended()) { // Allow the unknown opcode. return; } // A frame has an unknown opcode. throw new WebSocketException( WebSocketError.UNKNOWN_OPCODE, "A frame has an unknown opcode: 0x" + Integer.toHexString(frame.getOpcode())); }
java
static Set<String> parseParametersToCheck(final String initParamValue) { final Set<String> parameterNames = new HashSet<String>(); if (null == initParamValue) { return parameterNames; } final String[] tokens = initParamValue.split("\\s+"); if (0 == tokens.length) { throw new IllegalArgumentException( "[" + initParamValue + "] had no tokens but should have had at least one token."); } if (1 == tokens.length && "*".equals(tokens[0])) { return parameterNames; } for (final String parameterName : tokens) { if ("*".equals(parameterName)) { throw new IllegalArgumentException( "Star token encountered among other tokens in parsing [" + initParamValue + "]"); } parameterNames.add(parameterName); } return parameterNames; }
python
def find(self): ''' find the infors. ''' keyword = self.get_argument('keyword').strip() kwd = { 'pager': '', 'title': 'Searching Result', } self.render('user/info_list/find_list.html', userinfo=self.userinfo, kwd=kwd, recs=MPost.get_by_keyword(keyword))
python
def plot_roc(evaluation, class_index=None, title=None, key_loc="lower right", outfile=None, wait=True): """ Plots the ROC (receiver operator characteristics) curve for the given predictions. TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html :param evaluation: the evaluation to obtain the predictions from :type evaluation: Evaluation :param class_index: the list of 0-based indices of the class-labels to create the plot for :type class_index: list :param title: an optional title :type title: str :param key_loc: the position string for the key :type key_loc: str :param outfile: the output file, ignored if None :type outfile: str :param wait: whether to wait for the user to close the plot :type wait: bool """ if not plot.matplotlib_available: logger.error("Matplotlib is not installed, plotting unavailable!") return if class_index is None: class_index = [0] ax = None for cindex in class_index: data = generate_thresholdcurve_data(evaluation, cindex) head = evaluation.header area = get_auc(data) x, y = get_thresholdcurve_data(data, "False Positive Rate", "True Positive Rate") if ax is None: fig, ax = plt.subplots() ax.set_xlabel("False Positive Rate") ax.set_ylabel("True Positive Rate") if title is None: title = "ROC" ax.set_title(title) ax.grid(True) fig.canvas.set_window_title(title) plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plot_label = head.class_attribute.value(cindex) + " (AUC: %0.4f)" % area ax.plot(x, y, label=plot_label) ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3") plt.draw() plt.legend(loc=key_loc, shadow=True) if outfile is not None: plt.savefig(outfile) if wait: plt.show()
python
def send_frame(self, frame): ''' Queue a frame for sending. Will send immediately if there are no pending synchronous transactions on this connection. ''' if self.closed: if self.close_info and len(self.close_info['reply_text']) > 0: raise ChannelClosed( "channel %d is closed: %s : %s", self.channel_id, self.close_info['reply_code'], self.close_info['reply_text']) raise ChannelClosed() # If there's any pending event at all, then it means that when the # current dispatch loop started, all possible frames were flushed # and the remaining item(s) starts with a sync callback. After careful # consideration, it seems that it's safe to assume the len>0 means to # buffer the frame. The other advantage here is if not len(self._pending_events): if not self._active and \ isinstance(frame, (ContentFrame, HeaderFrame)): raise Channel.Inactive( "Channel %d flow control activated", self.channel_id) self._connection.send_frame(frame) else: self._pending_events.append(frame)
python
def _subproc(scons_env, cmd, error = 'ignore', **kw): """Do common setup for a subprocess.Popen() call This function is still in draft mode. We're going to need something like it in the long run as more and more places use subprocess, but I'm sure it'll have to be tweaked to get the full desired functionality. one special arg (so far?), 'error', to tell what to do with exceptions. """ # allow std{in,out,err} to be "'devnull'" io = kw.get('stdin') if is_String(io) and io == 'devnull': kw['stdin'] = open(os.devnull) io = kw.get('stdout') if is_String(io) and io == 'devnull': kw['stdout'] = open(os.devnull, 'w') io = kw.get('stderr') if is_String(io) and io == 'devnull': kw['stderr'] = open(os.devnull, 'w') # Figure out what shell environment to use ENV = kw.get('env', None) if ENV is None: ENV = get_default_ENV(scons_env) # Ensure that the ENV values are all strings: new_env = {} for key, value in ENV.items(): if is_List(value): # If the value is a list, then we assume it is a path list, # because that's a pretty common list-like value to stick # in an environment variable: value = SCons.Util.flatten_sequence(value) new_env[key] = os.pathsep.join(map(str, value)) else: # It's either a string or something else. If it's a string, # we still want to call str() because it might be a *Unicode* # string, which makes subprocess.Popen() gag. If it isn't a # string or a list, then we just coerce it to a string, which # is the proper way to handle Dir and File instances and will # produce something reasonable for just about everything else: new_env[key] = str(value) kw['env'] = new_env try: return subprocess.Popen(cmd, **kw) except EnvironmentError as e: if error == 'raise': raise # return a dummy Popen instance that only returns error class dummyPopen(object): def __init__(self, e): self.exception = e def communicate(self, input=None): return ('', '') def wait(self): return -self.exception.errno stdin = None class f(object): def read(self): return '' def readline(self): return '' def __iter__(self): return iter(()) stdout = stderr = f() return dummyPopen(e)
python
def get_key_value(parts, key_field_num, ignore_missing_keys, seen_keys, output_type): """ get the key value from the line and check it's not a dup. or missing. fields with only whitespace are considered empty (missing). :param ignore_missing_keys: if True, return None for missing keys. If false, missing keys cause an exception (MissingKeyError). :param seen_keys: a set of keys already seen. :return: the key value, or None if the field was empty. """ key_val = parts[key_field_num] if key_val.strip() == "": if not ignore_missing_keys: raise MissingKeyError("missing key value") else: return None if key_val in seen_keys and \ output_type is OutputType.error_on_dups: raise DuplicateKeyError(key_val + " appears multiple times as key") return key_val
python
def format(self, version=0x10, wipe=None): """Format a FeliCa Lite Tag for NDEF. """ return super(FelicaLite, self).format(version, wipe)
java
public Optional<Tag> getTag() { AttributeType tagAttributeType = annotationType.getTagAttribute(); if (tagAttributeType == null) { return Optional.ofNullable(get(Types.TAG).as(Tag.class)); } return Optional.ofNullable(get(tagAttributeType).as(Tag.class)); }
java
public static final CurrencySymbolPosition parseCurrencySymbolPosition(String value) { CurrencySymbolPosition result = CurrencySymbolPosition.BEFORE; switch (NumberHelper.getInt(value)) { case 0: { result = CurrencySymbolPosition.BEFORE; break; } case 1: { result = CurrencySymbolPosition.AFTER; break; } case 2: { result = CurrencySymbolPosition.BEFORE_WITH_SPACE; break; } case 3: { result = CurrencySymbolPosition.AFTER_WITH_SPACE; break; } } return (result); }
python
def sync_to(self, destination): """ Sync an RPM from a REMOTE to a LOCAL path. Returns True if the item required a sync, False if it already existed locally. TODO: Remove dupe code in Cart.py:sync_remotes() """ rpm = RPM(self.path) rpm.sync(destination) if rpm.modified: juicer.utils.Log.log_debug("Source RPM modified. New 'path': %s" % rpm) self.update(rpm.path) return True return False
java
boolean runResumeReadWrite() { boolean ret = false; if(anyAreSet(state, FLAG_SHOULD_RESUME_WRITES)) { responseChannel.runResume(); ret = true; } if(anyAreSet(state, FLAG_SHOULD_RESUME_READS)) { requestChannel.runResume(); ret = true; } return ret; }
java
public void updateCachingAndArtifacts(ParsedDeployment parsedDeployment) { CommandContext commandContext = Context.getCommandContext(); final ProcessEngineConfigurationImpl processEngineConfiguration = Context.getProcessEngineConfiguration(); DeploymentCache<ProcessDefinitionCacheEntry> processDefinitionCache = processEngineConfiguration.getDeploymentManager().getProcessDefinitionCache(); DeploymentEntity deployment = parsedDeployment.getDeployment(); for (ProcessDefinitionEntity processDefinition : parsedDeployment.getAllProcessDefinitions()) { BpmnModel bpmnModel = parsedDeployment.getBpmnModelForProcessDefinition(processDefinition); Process process = parsedDeployment.getProcessModelForProcessDefinition(processDefinition); ProcessDefinitionCacheEntry cacheEntry = new ProcessDefinitionCacheEntry(processDefinition, bpmnModel, process); processDefinitionCache.add(processDefinition.getId(), cacheEntry); addDefinitionInfoToCache(processDefinition, processEngineConfiguration, commandContext); // Add to deployment for further usage deployment.addDeployedArtifact(processDefinition); } }
python
def __skeleton_difference(graph, image, boundary_term, spacing): """ A skeleton for the calculation of intensity difference based boundary terms. Iterates over the images dimensions and generates for each an array of absolute neighbouring voxel :math:`(p, q)` intensity differences :math:`|I_p, I_q|`. These are then passed to the supplied function :math:`g(\cdot)` for for boundary term computation. Finally the returned edge weights are added to the graph. Formally for each edge :math:`(p, q)` of the image, their edge weight is computed as .. math:: w(p,q) = g(|I_p - I_q|) ,where :math:`g(\cdot)` is the supplied boundary term function. The boundary term function has to take an array of intensity differences as only parameter and return an array of the same shape containing the edge weights. For the implemented function the condition :math:`g(\cdot)\in(0, 1]` must hold true, i.e., it has to be strictly positive with :math:`1` as the upper limit. @note the underlying neighbourhood connectivity is 4 for 2D, 6 for 3D, etc. @note This function is able to work with images of arbitrary dimensions, but was only tested for 2D and 3D cases. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of absolute intensity differences @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False """ def intensity_difference(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the absolute intensity differences. """ return scipy.absolute(neighbour_one - neighbour_two) __skeleton_base(graph, image, boundary_term, intensity_difference, spacing)
python
def process(self, sched, coro): """This is called when the operation is to be processed by the scheduler. Code here works modifies the scheduler and it's usualy very crafty. Subclasses usualy overwrite this method and call it from the superclass.""" if self.prio == priority.DEFAULT: self.prio = sched.default_priority
python
def get_historical_price(self, crypto, fiat, at_time): """ Using the quandl.com API, get the historical price (by day). The CRYPTOCHART source claims to be from multiple exchange sources for price (they say best exchange is most volume). """ # represents the 'width' of the quandl data returned (one day) # if quandl ever supports data hourly or something, this can be changed interval = datetime.timedelta(hours=48) crypto = crypto.lower() fiat = fiat.lower() at_time = arrow.get(at_time).datetime data = crypto_data[crypto] name, date_created = data['name'], data['genesis_date'] if date_created.replace(tzinfo=pytz.utc) > at_time: raise Exception("%s (%s) did not exist on %s" % (name, crypto, at_time)) if crypto == 'btc': # Bitcoin to fiat if fiat == 'usd': if at_time < datetime.datetime(2013, 2, 1, tzinfo=pytz.utc): exchange = 'MtGox' else: exchange = "Bitstamp" else: exchange = quandl_exchange_btc_to_fiat[fiat.upper()] source = "BITCOIN/%s%s" % (exchange.upper(), fiat.upper()) price_index = 1 else: # some altcoin to bitcoin if fiat != 'btc': raise Exception("Altcoins are only available via BTC base fiat") if crypto == 'ltc': source, price_index = ['BTCE/BTCLTC', 4] else: source, price_index = ['CRYPTOCHART/' + crypto.upper(), 1] url = "https://www.quandl.com/api/v1/datasets/%s.json" % source trim = "?trim_start={0:%Y-%m-%d}&trim_end={1:%Y-%m-%d}".format( at_time - interval, at_time + interval ) response = self.get_url(url + trim).json() closest_distance = interval best_price = None for line in response['data']: date = line[0] price = line[price_index] tick_date = arrow.get(date).datetime distance = at_time - tick_date if distance.total_seconds() == 0: return price, source, tick_date if distance < closest_distance: closest_distance = distance best_price = price best_date = tick_date if not best_price: msg = "Quandl's data source is incomplete. Could not get best price for %s/%s on %s." % ( crypto, fiat, at_time ) raise NoData(msg) return best_price, source, best_date
python
def insert_before(self, obj, value, recursive=True): """Insert *value* immediately before *obj*. *obj* can be either a string, a :class:`.Node`, or another :class:`.Wikicode` object (as created by :meth:`get_sections`, for example). If *obj* is a string, we will operate on all instances of that string within the code, otherwise only on the specific instance given. *value* can be anything parsable by :func:`.parse_anything`. If *recursive* is ``True``, we will try to find *obj* within our child nodes even if it is not a direct descendant of this :class:`.Wikicode` object. If *obj* is not found, :exc:`ValueError` is raised. """ if isinstance(obj, (Node, Wikicode)): context, index = self._do_strong_search(obj, recursive) context.insert(index.start, value) else: for exact, context, index in self._do_weak_search(obj, recursive): if exact: context.insert(index.start, value) else: obj = str(obj) self._slice_replace(context, index, obj, str(value) + obj)
java
public synchronized XAttribute put(String key, XAttribute value) { if(backingStore == null) { try { backingStore = backingStoreClass.newInstance(); } catch (Exception e) { // Fuckup e.printStackTrace(); } } return backingStore.put(key, value); }
java
public static Map<String, Object> createFormParamRefreshTokenGrantType(String refreshToken, String scope, String clientId) { if (refreshToken == null) { throw new IllegalArgumentException("Missing the required parameter 'refresh_token'"); } Map<String, Object> formParams = new HashMap<>(); formParams.put("grant_type", "refresh_token"); formParams.put("refresh_token", refreshToken); if (scope != null) { formParams.put("scope", scope); } if (clientId != null) { formParams.put("client_id", clientId); } return formParams; }
python
def clipPolygons(self, polygons): """ Recursively remove all polygons in `polygons` that are inside this BSP tree. """ if not self.plane: return polygons[:] front = [] back = [] for poly in polygons: self.plane.splitPolygon(poly, front, back, front, back) if self.front: front = self.front.clipPolygons(front) if self.back: back = self.back.clipPolygons(back) else: back = [] front.extend(back) return front
python
def _set_annotation_to_str(annotation_data: Mapping[str, Mapping[str, bool]], key: str) -> str: """Return a set annotation string.""" value = annotation_data[key] if len(value) == 1: return 'SET {} = "{}"'.format(key, list(value)[0]) x = ('"{}"'.format(v) for v in sorted(value)) return 'SET {} = {{{}}}'.format(key, ', '.join(x))
java
public static Environment declareThisWithin(Decl.FunctionOrMethod decl, Environment environment) { if (decl instanceof Decl.Method) { Decl.Method method = (Decl.Method) decl; environment = environment.declareWithin("this", method.getLifetimes()); } return environment; }
python
def has_object_permission(checker_name, user, obj): """Check if a user has permission to perform an action on an object.""" if user and user.is_superuser: return True checker = PermissionsManager.retrieve_checker(checker_name) user_roles = get_user_roles(user) if not user_roles: user_roles = [None] return any([checker(user_role, user, obj) for user_role in user_roles])
java
public String getArrayClassName(Class<?> clazz) { if (clazz.isArray()) { return getArrayClassName(clazz.getComponentType()) + "[]"; } return clazz.getName(); }
python
def create_cloudwatch_log_event(app_name, env, region, rules): """Create cloudwatch log event for lambda from rules. Args: app_name (str): name of the lambda function env (str): Environment/Account for lambda function region (str): AWS region of the lambda function rules (str): Trigger rules from the settings """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('logs') log_group = rules.get('log_group') filter_name = rules.get('filter_name') filter_pattern = rules.get('filter_pattern') if not log_group: LOG.critical('Log group is required and no "log_group" is defined!') raise InvalidEventConfiguration('Log group is required and no "log_group" is defined!') if not filter_name: LOG.critical('Filter name is required and no filter_name is defined!') raise InvalidEventConfiguration('Filter name is required and no filter_name is defined!') if filter_pattern is None: LOG.critical('Filter pattern is required and no filter_pattern is defined!') raise InvalidEventConfiguration('Filter pattern is required and no filter_pattern is defined!') lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region) statement_id = '{}_cloudwatchlog_{}'.format(app_name, filter_name.replace(" ", "_")) principal = 'logs.{}.amazonaws.com'.format(region) account_id = get_env_credential(env=env)['accountId'] source_arn = "arn:aws:logs:{0}:{1}:log-group:{2}:*".format(region, account_id, log_group) add_lambda_permissions( function=lambda_alias_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region) cloudwatch_client.put_subscription_filter( logGroupName=log_group, filterName=filter_name, filterPattern=filter_pattern, destinationArn=lambda_alias_arn) LOG.info("Created Cloudwatch log event with filter: %s", filter_pattern)
java
@Override public void reconnectFailedConnection(SearchFilter searchFilter) throws SQLException { proxy.lock.lock(); try { if (!searchFilter.isInitialConnection() && (isExplicitClosed() || !isMasterHostFail())) { return; } currentConnectionAttempts.incrementAndGet(); resetOldsBlackListHosts(); List<HostAddress> loopAddress = new LinkedList<>(urlParser.getHostAddresses()); if (HaMode.FAILOVER.equals(mode)) { //put the list in the following order // - random order not connected host // - random order blacklist host // - random order connected host loopAddress.removeAll(getBlacklistKeys()); Collections.shuffle(loopAddress); List<HostAddress> blacklistShuffle = new LinkedList<>(getBlacklistKeys()); blacklistShuffle.retainAll(urlParser.getHostAddresses()); Collections.shuffle(blacklistShuffle); loopAddress.addAll(blacklistShuffle); } else { //order in sequence loopAddress.removeAll(getBlacklistKeys()); loopAddress.addAll(getBlacklistKeys()); loopAddress.retainAll(urlParser.getHostAddresses()); } //put connected at end if (currentProtocol != null && !isMasterHostFail()) { loopAddress.remove(currentProtocol.getHostAddress()); //loopAddress.add(currentProtocol.getHostAddress()); } MasterProtocol.loop(this, globalInfo, loopAddress, searchFilter); //close loop if all connection are retrieved if (!isMasterHostFail()) { FailoverLoop.removeListener(this); } //if no error, reset failover variables resetMasterFailoverData(); } finally { proxy.lock.unlock(); } }
python
def filter_from_url_arg(model_cls, query, arg, query_operator=and_, arg_types=None): """ Parse filter URL argument ``arg`` and apply to ``query`` Example: 'column1<=value,column2==value' -> query.filter(Model.column1 <= value, Model.column2 == value) """ fields = arg.split(',') mapper = class_mapper(model_cls) if not arg_types: arg_types = {} exprs = [] joins = set() for expr in fields: if expr == "": continue e_mapper = mapper e_model_cls = model_cls operator = None method = None for op in operator_order: if op in expr: operator = op method = operator_to_method[op] break if operator is None: raise Exception('No operator in expression "{0}".'.format(expr)) (column_names, value) = expr.split(operator) column_names = column_names.split('__') value = value.strip() for column_name in column_names: if column_name in arg_types: typed_value = arg_types[column_name](value) else: typed_value = value if column_name in e_mapper.relationships: joins.add(column_name) e_model_cls = e_mapper.attrs[column_name].mapper.class_ e_mapper = class_mapper(e_model_cls) if hasattr(e_model_cls, column_name): column = getattr(e_model_cls, column_name) exprs.append(getattr(column, method)(typed_value)) else: raise Exception('Invalid property {0} in class {1}.'.format(column_name, e_model_cls)) return query.join(*joins).filter(query_operator(*exprs))
java
public void moveTo(float offsetX, float offsetY) { if (swipeVertical) { // Check X offset if (toCurrentScale(optimalPageWidth) < getWidth()) { offsetX = getWidth() / 2 - toCurrentScale(optimalPageWidth) / 2; } else { if (offsetX > 0) { offsetX = 0; } else if (offsetX + toCurrentScale(optimalPageWidth) < getWidth()) { offsetX = getWidth() - toCurrentScale(optimalPageWidth); } } // Check Y offset if (isZooming()) { if (toCurrentScale(optimalPageHeight) < getHeight()) { miniMapRequired = false; offsetY = getHeight() / 2 - toCurrentScale((currentFilteredPage + 0.5f) * optimalPageHeight); } else { miniMapRequired = true; if (offsetY + toCurrentScale(currentFilteredPage * optimalPageHeight) > 0) { offsetY = -toCurrentScale(currentFilteredPage * optimalPageHeight); } else if (offsetY + toCurrentScale((currentFilteredPage + 1) * optimalPageHeight) < getHeight()) { offsetY = getHeight() - toCurrentScale((currentFilteredPage + 1) * optimalPageHeight); } } } else { float maxY = calculateCenterOffsetForPage(currentFilteredPage + 1); float minY = calculateCenterOffsetForPage(currentFilteredPage - 1); if (offsetY < maxY) { offsetY = maxY; } else if (offsetY > minY) { offsetY = minY; } } } else { // Check Y offset if (toCurrentScale(optimalPageHeight) < getHeight()) { offsetY = getHeight() / 2 - toCurrentScale(optimalPageHeight) / 2; } else { if (offsetY > 0) { offsetY = 0; } else if (offsetY + toCurrentScale(optimalPageHeight) < getHeight()) { offsetY = getHeight() - toCurrentScale(optimalPageHeight); } } // Check X offset if (isZooming()) { if (toCurrentScale(optimalPageWidth) < getWidth()) { miniMapRequired = false; offsetX = getWidth() / 2 - toCurrentScale((currentFilteredPage + 0.5f) * optimalPageWidth); } else { miniMapRequired = true; if (offsetX + toCurrentScale(currentFilteredPage * optimalPageWidth) > 0) { offsetX = -toCurrentScale(currentFilteredPage * optimalPageWidth); } else if (offsetX + toCurrentScale((currentFilteredPage + 1) * optimalPageWidth) < getWidth()) { offsetX = getWidth() - toCurrentScale((currentFilteredPage + 1) * optimalPageWidth); } } } else { float maxX = calculateCenterOffsetForPage(currentFilteredPage + 1); float minX = calculateCenterOffsetForPage(currentFilteredPage - 1); if (offsetX < maxX) { offsetX = maxX; } else if (offsetX > minX) { offsetX = minX; } } } currentXOffset = offsetX; currentYOffset = offsetY; calculateMinimapAreaBounds(); invalidate(); }
python
def _get_sorting_key_values(self, array1, array2): """return the sorting key values as a series""" concat_arrays = numpy.concatenate([array1, array2]) unique_values = numpy.unique(concat_arrays) return numpy.sort(unique_values)
python
def get_computer_desc(): ''' Get PRETTY_HOSTNAME value stored in /etc/machine-info If this file doesn't exist or the variable doesn't exist return False. :return: Value of PRETTY_HOSTNAME if this does not exist False. :rtype: str CLI Example: .. code-block:: bash salt '*' system.get_computer_desc ''' hostname_cmd = salt.utils.path.which('hostnamectl') if hostname_cmd: desc = __salt__['cmd.run']( [hostname_cmd, 'status', '--pretty'], python_shell=False ) else: desc = None pattern = re.compile(r'^\s*PRETTY_HOSTNAME=(.*)$') try: with salt.utils.files.fopen('/etc/machine-info', 'r') as mach_info: for line in mach_info.readlines(): line = salt.utils.stringutils.to_unicode(line) match = pattern.match(line) if match: # get rid of whitespace then strip off quotes desc = _strip_quotes(match.group(1).strip()) # no break so we get the last occurance except IOError: pass if desc is None: return False return desc.replace(r'\"', r'"').replace(r'\n', '\n').replace(r'\t', '\t')
java
@Override protected AssetInformation extractInformationFromAsset(File archive, final ArtifactMetadata metadata) throws PrivilegedActionException, ProductInfoParseException, IOException { // Create the asset information AssetInformation assetInformtion = new AssetInformation(); ZipFile zipFile = null; try { zipFile = AccessController.doPrivileged(new PrivilegedExceptionAction<ZipFile>() { @Override public ZipFile run() throws IOException { return new ZipFile(metadata.getArchive()); } }); assetInformtion.addProductInfos(zipFile, "wlp/", archive); assetInformtion.type = ResourceType.INSTALL; assetInformtion.provideFeature = metadata.getProperty("provideFeature"); assetInformtion.laLocation = "wlp/lafiles_text/LA"; assetInformtion.liLocation = "wlp/lafiles_text/LI"; assetInformtion.fileWithLicensesIn = metadata.getArchive(); } finally { if (zipFile != null) { zipFile.close(); } } return assetInformtion; }
python
def replace_product_by_id(cls, product_id, product, **kwargs): """Replace Product Replace all attributes of Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_product_by_id(product_id, product, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to replace (required) :param Product product: Attributes of product to replace (required) :return: Product If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) else: (data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) return data
java
protected void initRequest() { _hostHeader = null; _xForwardedHostHeader = null; _expect100Continue = false; _cookies.clear(); _contentLengthIn = -1; _hasReadStream = false; _readEncoding = null; //_request = request; //_requestFacade = getHttp().createFacade(this); _startTime = -1; _expireTime = -1; _isUpgrade = false; _statusCode = 200; _statusMessage = "OK"; _headerKeysOut.clear(); _headerValuesOut.clear(); _contentTypeOut = null; _contentEncodingOut = null; _contentLengthOut = -1; _footerKeys.clear(); _footerValues.clear(); out().start(); _isHeaderWritten = false; _isClosed = false; //_serverHeader = http().serverHeader(); _isKeepalive = true; }
python
def update(self): """Get all events and process them by calling update_on_event()""" events = pygame.event.get() for e in events: self.update_on_event(e) for wid, cond in self._widgets: if cond(): wid.update(events)
python
def create(klass, account, name): """ Creates a new tailored audience. """ audience = klass(account) getattr(audience, '__create_audience__')(name) try: return audience.reload() except BadRequest as e: audience.delete() raise e
python
def _init_map(self): """stub""" super(IRTItemFormRecord, self)._init_map() self.my_osid_object_form._my_map['decimalValues']['difficulty'] = \ self._decimal_value_metadata['default_decimal_values'][1] self.my_osid_object_form._my_map['decimalValues']['discrimination'] = \ self._decimal_value_metadata['default_decimal_values'][1] self.my_osid_object_form._my_map['decimalValues']['pseudoGuessing'] = \ self._decimal_value_metadata['default_decimal_values'][1]
java
public Observable<ServiceResponse<RefreshIndex>> refreshIndexMethodWithServiceResponseAsync(String listId, String language) { if (this.client.baseUrl() == null) { throw new IllegalArgumentException("Parameter this.client.baseUrl() is required and cannot be null."); } if (listId == null) { throw new IllegalArgumentException("Parameter listId is required and cannot be null."); } if (language == null) { throw new IllegalArgumentException("Parameter language is required and cannot be null."); } String parameterizedHost = Joiner.on(", ").join("{baseUrl}", this.client.baseUrl()); return service.refreshIndexMethod(listId, language, this.client.acceptLanguage(), parameterizedHost, this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<RefreshIndex>>>() { @Override public Observable<ServiceResponse<RefreshIndex>> call(Response<ResponseBody> response) { try { ServiceResponse<RefreshIndex> clientResponse = refreshIndexMethodDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
python
def download(self, id, directory): """ Download the file with given identifier to the given directory :param id: Identifier of the lot to download :param directory: Directory where the downloaded ZIP should be stored :return: None """ url = (self._base + 'lot/{0}/download').format(id) r = requests.get(url, auth=self._auth, stream=True) file_name = r.headers['Content-Disposition'].split('"')[-2] if len(directory) < 1: return with open(os.path.join(directory, file_name), 'wb') as file: for chunk in r.iter_content(chunk_size=1024): if chunk: file.write(chunk) file.flush()
python
def variables(self): """ Display a list of templatable variables present in the file. Templating is accomplished by creating a bracketed object in the same way that Python performs `string formatting`_. The editor is able to replace the placeholder value of the template. Integer templates are positional arguments. .. _string formatting: https://docs.python.org/3.6/library/string.html """ string = str(self) constants = [match[1:-1] for match in re.findall('{{[A-z0-9]}}', string)] variables = re.findall('{[A-z0-9]*}', string) return sorted(set(variables).difference(constants))
python
def examples(): """Prints examples of using the script to the console using colored output. """ script = "Continuous Integration Automation Server" explain = ("For complex codes with many collaborators, it is often difficult to maintian " "a pristine code that everyone can rely on. If every developer has power to " "commit to master, unintentional mistakes happen that can cripple those who " "rely on the code for day-to-day business. One way to overcome this is to isolate " "the master branch and require collaborators to work on separate forks/branches. " "When they are ready to commit their changes to master, they create a pull request " "that summarizes the changes and why they want to merge them into the master branch.\n\n" "A continuous integration server monitors repositories for new pull requests. When a new " "request is made, the proposed changes are downloaded to a local sandbox and tested " "against all the existing code. If the master branch has a rich suite of unit tests " "this will detect any bugs in the proposed merger. If all the tests pass, then the " "owner of the master branch can have confidence that the merger will be okay.") contents = [(("Configure this machine to be a CI server. Unfortunately, this step requires " "sudo authority because the API accesses the crontab for arbitrary users."), "sudo ci.py -setup", ("Before this setup can proceed, you need to make sure the global configuration " "XML file has been created and the environment variable to its path has been set:\n" "\texport PYCI_XML='~/path/to/global.xml'.\nSee also: -rollback")), (("Remove the cron tab from the server, delete the list of installed repositories " "and undo anything else that the script did when -setup was used."), "sudo ci.py -rollback", ("This action deletes the files specified in 'ARCHFILE' and 'DATAFILE' in 'global.xml'. " "Also, the crontab is removed, which is why sudo privileges are needed. See also -setup.")), (("Install the repository described by myrepo.xml onto the CI server so that " "it's pull requests are monitored and unit ,tested."), "ci.py -install myrepo.xml", ("After installation, you can query the repository immediately by running the " "script with -cron. You can install a list of repositories with a single command." "See also -uninstall.")), (("Run the routines that check for new pull requests, run the unit tests, and post " "the results to the media wiki."), "ci.py -cron", "")] required = ("REQUIRED:\n\t-'repo.xml' file for *each* repository that gets installed on the server.\n" "\t-'global.xml' file with configuration settings for *all* repositories.\n" "\t- git user and API key with push access for *each* repository installed.") output = ("RETURNS: prints status information to stdout.") details = ("This script installs a continous integration server on the local machine by " "configuring a cron to call this script every couple of minutes. The script interacts " "with github using an API to monitor the pull requests. When new ones are found, the " "list of tests specified in the 'repo.xml' file is executed and the results are posted " "to a media wiki page associated with the specific pull request. For more details, see " "the online repo at https://github.com/rosenbrockc/ci.") outputfmt = ("") from pyci.msg import example example(script, explain, contents, required, output, outputfmt, details)
java
public PagedList<VulnerabilityAssessmentScanRecordInner> listByDatabase(final String resourceGroupName, final String serverName, final String databaseName) { ServiceResponse<Page<VulnerabilityAssessmentScanRecordInner>> response = listByDatabaseSinglePageAsync(resourceGroupName, serverName, databaseName).toBlocking().single(); return new PagedList<VulnerabilityAssessmentScanRecordInner>(response.body()) { @Override public Page<VulnerabilityAssessmentScanRecordInner> nextPage(String nextPageLink) { return listByDatabaseNextSinglePageAsync(nextPageLink).toBlocking().single().body(); } }; }
python
def _safe_release_connection(self): """Try to release a connection. If an exception is hit, log and return the error string. """ try: self.adapter.release_connection() except Exception as exc: logger.debug( 'Error releasing connection for node {}: {!s}\n{}' .format(self.node.name, exc, traceback.format_exc()) ) return dbt.compat.to_string(exc) return None
python
def _set_factory_context(factory_class, bundle_context): # type: (type, Optional[BundleContext]) -> Optional[FactoryContext] """ Transforms the context data dictionary into its FactoryContext object form. :param factory_class: A manipulated class :param bundle_context: The class bundle context :return: The factory context, None on error """ try: # Try to get the factory context (built using decorators) context = getattr(factory_class, constants.IPOPO_FACTORY_CONTEXT) except AttributeError: # The class has not been manipulated, or too badly return None if not context.completed: # Partial context (class not manipulated) return None # Associate the factory to the bundle context context.set_bundle_context(bundle_context) return context
python
def is_balance_proof_usable_onchain( received_balance_proof: BalanceProofSignedState, channel_state: NettingChannelState, sender_state: NettingChannelEndState, ) -> SuccessOrError: """ Checks the balance proof can be used on-chain. For a balance proof to be valid it must be newer than the previous one, i.e. the nonce must increase, the signature must tie the balance proof to the correct channel, and the values must not result in an under/overflow onchain. Important: This predicate does not validate all the message fields. The fields locksroot, transferred_amount, and locked_amount **MUST** be validated elsewhere based on the message type. """ expected_nonce = get_next_nonce(sender_state) is_valid_signature_, signature_msg = is_valid_signature( received_balance_proof, sender_state.address, ) result: SuccessOrError # TODO: Accept unlock messages if the node has not yet sent a transaction # with the balance proof to the blockchain, this will save one call to # unlock on-chain for the non-closing party. if get_status(channel_state) != CHANNEL_STATE_OPENED: # The channel must be opened, otherwise if receiver is the closer, the # balance proof cannot be used onchain. msg = f'The channel is already closed.' result = (False, msg) elif received_balance_proof.channel_identifier != channel_state.identifier: # Informational message, the channel_identifier **validated by the # signature** must match for the balance_proof to be valid. msg = ( f"channel_identifier does not match. " f"expected: {channel_state.identifier} " f"got: {received_balance_proof.channel_identifier}." ) result = (False, msg) elif received_balance_proof.token_network_identifier != channel_state.token_network_identifier: # Informational message, the token_network_identifier **validated by # the signature** must match for the balance_proof to be valid. msg = ( f"token_network_identifier does not match. " f"expected: {channel_state.token_network_identifier} " f"got: {received_balance_proof.token_network_identifier}." ) result = (False, msg) elif received_balance_proof.chain_id != channel_state.chain_id: # Informational message, the chain_id **validated by the signature** # must match for the balance_proof to be valid. msg = ( f"chain_id does not match channel's " f"chain_id. expected: {channel_state.chain_id} " f"got: {received_balance_proof.chain_id}." ) result = (False, msg) elif not is_balance_proof_safe_for_onchain_operations(received_balance_proof): transferred_amount_after_unlock = ( received_balance_proof.transferred_amount + received_balance_proof.locked_amount ) msg = ( f"Balance proof total transferred amount would overflow onchain. " f"max: {UINT256_MAX} result would be: {transferred_amount_after_unlock}" ) result = (False, msg) elif received_balance_proof.nonce != expected_nonce: # The nonces must increase sequentially, otherwise there is a # synchronization problem. msg = ( f'Nonce did not change sequentially, expected: {expected_nonce} ' f'got: {received_balance_proof.nonce}.' ) result = (False, msg) elif not is_valid_signature_: # The signature must be valid, otherwise the balance proof cannot be # used onchain. result = (False, signature_msg) else: result = (True, None) return result
python
def write(self, path=None, *args, **kwargs): """ Perform formatting and write the formatted string to a file or stdout. Optional arguments can be used to format the editor's contents. If no file path is given, prints to standard output. Args: path (str): Full file path (default None, prints to stdout) *args: Positional arguments to format the editor with **kwargs: Keyword arguments to format the editor with """ if path is None: print(self.format(*args, **kwargs)) else: with io.open(path, 'w', newline="") as f: f.write(self.format(*args, **kwargs))
java
private void free(IoSession session) { IoBuffer buf = buffersMap.remove(session); if (buf != null) { buf.free(); } }
python
def html5_parse_simple_color(input): """ Apply the simple color parsing algorithm from section 2.4.6 of HTML5. """ # 1. Let input be the string being parsed. # # 2. If input is not exactly seven characters long, then return an # error. if not isinstance(input, unicode) or len(input) != 7: raise ValueError( u"An HTML5 simple color must be a Unicode string " u"exactly seven characters long." ) # 3. If the first character in input is not a U+0023 NUMBER SIGN # character (#), then return an error. if not input.startswith('#'): raise ValueError( u"An HTML5 simple color must begin with the " u"character '#' (U+0023)." ) # 4. If the last six characters of input are not all ASCII hex # digits, then return an error. if not all(c in string.hexdigits for c in input[1:]): raise ValueError( u"An HTML5 simple color must contain exactly six ASCII hex digits." ) # 5. Let result be a simple color. # # 6. Interpret the second and third characters as a hexadecimal # number and let the result be the red component of result. # # 7. Interpret the fourth and fifth characters as a hexadecimal # number and let the result be the green component of result. # # 8. Interpret the sixth and seventh characters as a hexadecimal # number and let the result be the blue component of result. # # 9. Return result. return HTML5SimpleColor( int(input[1:3], 16), int(input[3:5], 16), int(input[5:7], 16) )
java
@Override public void lifecycleEvent(LifecycleEvent event) { // called several times super.lifecycleEvent(event); if (!alreadyFirstLifecycle) { // ContextConfig is not thread-safe so no care alreadyFirstLifecycle = true; if (isWebFragmentsSelectorEnabled()) { final JarScanner jarScanner = extractJarScanner(); // not null final JarScanFilter jarScanFilter = jarScanner.getJarScanFilter(); // not null jarScanner.setJarScanFilter(createSelectableJarScanFilter(jarScanFilter)); } } }
java
@Override public Request<RequestSpotFleetRequest> getDryRunRequest() { Request<RequestSpotFleetRequest> request = new RequestSpotFleetRequestMarshaller().marshall(this); request.addParameter("DryRun", Boolean.toString(true)); return request; }
python
def get_function(pkgpath): """Take a full path to a python method or class, for example mypkg.subpkg.method and return the method or class (after importing the required packages) """ # Extract the module and function name from pkgpath elems = pkgpath.split('.') if len(elems) <= 1: raise PyMacaronCoreException("Path %s is too short. Should be at least module.func." % elems) func_name = elems[-1] func_module = '.'.join(elems[0:-1]) # Load the function's module and get the function try: m = import_module(func_module) f = getattr(m, func_name) return f except Exception as e: t = traceback.format_exc() raise PyMacaronCoreException("Failed to import %s: %s\nTrace:\n%s" % (pkgpath, str(e), t))
python
def consume_arguments(self, argument_list): """ Takes arguments from a list while there are parameters that can accept them """ while True: argument_count = len(argument_list) for parameter in self.values(): argument_list = parameter.consume_arguments(argument_list) if len(argument_list) == argument_count: return argument_list
java
@Override public Map<URI, MatchResult> findOperationsClassifiedBySome(Set<URI> modelReferences) { return findServicesClassifiedBySome(modelReferences, LogicConceptMatchType.Subsume); }
python
def sort_def_dict(def_dict: Dict[str, List[str]]) -> Dict[str, List[str]]: """Sort values of the lists of a defaultdict(list).""" for _, dd_list in def_dict.items(): dd_list.sort() return def_dict
java
@SuppressWarnings("unchecked") public void addItemEventListener(@SuppressWarnings("rawtypes") ItemEventListener listener) { StanzaListener conListener = new ItemEventTranslator(listener); itemEventToListenerMap.put(listener, conListener); pubSubManager.getConnection().addSyncStanzaListener(conListener, new EventContentFilter(EventElementType.items.toString(), "item")); }
java
@Override public CPRuleUserSegmentRel[] findByCPRuleId_PrevAndNext( long CPRuleUserSegmentRelId, long CPRuleId, OrderByComparator<CPRuleUserSegmentRel> orderByComparator) throws NoSuchCPRuleUserSegmentRelException { CPRuleUserSegmentRel cpRuleUserSegmentRel = findByPrimaryKey(CPRuleUserSegmentRelId); Session session = null; try { session = openSession(); CPRuleUserSegmentRel[] array = new CPRuleUserSegmentRelImpl[3]; array[0] = getByCPRuleId_PrevAndNext(session, cpRuleUserSegmentRel, CPRuleId, orderByComparator, true); array[1] = cpRuleUserSegmentRel; array[2] = getByCPRuleId_PrevAndNext(session, cpRuleUserSegmentRel, CPRuleId, orderByComparator, false); return array; } catch (Exception e) { throw processException(e); } finally { closeSession(session); } }
python
def getInstructions(self): ''' The same as calling ``client.getInstructions(build.setID)`` :returns: A list of instructions. :rtype: list ''' self._instructions = self._client.getInstructions(self.setID) return self._instructions
python
def individual(self, ind_id=None): """Return a individual object Args: ind_id (str): A individual id Returns: individual (puzzle.models.individual) """ for ind_obj in self.individual_objs: if ind_obj.ind_id == ind_id: return ind_obj return None
java
public Optional<KeyTransaction> show(long transactionId) { return HTTP.GET(String.format("/v2/key_transactions/%d.json", transactionId), KEY_TRANSACTION); }
python
def parse_endnotes(document, xmlcontent): """Parse endnotes document. Endnotes are defined in file 'endnotes.xml' """ endnotes = etree.fromstring(xmlcontent) document.endnotes = {} for note in endnotes.xpath('.//w:endnote', namespaces=NAMESPACES): paragraphs = [parse_paragraph(document, para) for para in note.xpath('.//w:p', namespaces=NAMESPACES)] document.endnotes[note.attrib[_name('{{{w}}}id')]] = paragraphs
java
public static URL constructResourceURL(Class<?> c, String resName) { String strUrl = ""; if (resName.startsWith("/")) // 资源名称以"/"开头,从类所在的Class Path根目录开始定位资源 strUrl = getClassRootPath(c) + resName.substring(1); else // 资源名称不以"/"开头,从类所在的目录开始定位资源 strUrl = getClassPath(c) + resName; try { return new URL(strUrl); } catch (Exception e) { return null; } }
python
def Convert(self, metadata, config, token=None): """Converts DNSClientConfiguration to ExportedDNSClientConfiguration.""" result = ExportedDNSClientConfiguration( metadata=metadata, dns_servers=" ".join(config.dns_server), dns_suffixes=" ".join(config.dns_suffix)) yield result
python
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR): """ Prepare the neural network output targets.""" if not os.path.exists(os.path.join(label_dir, "TEXT")): os.makedirs(os.path.join(label_dir, "TEXT")) if not os.path.exists(os.path.join(label_dir, "WORDLIST")): os.makedirs(os.path.join(label_dir, "WORDLIST")) for path in Path(org_xml_dir).glob("*.xml"): fn = path.name prefix, _ = os.path.splitext(fn) rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path)) # Write the sentence transcriptions to file sents = [preprocess_na(sent, label_type) for sent in sents] for i, sent in enumerate(sents): if sent.strip() == "": # Then there's no transcription, so ignore this. continue out_fn = "%s.%d.%s" % (prefix, i, label_type) sent_path = os.path.join(label_dir, rec_type, out_fn) with open(sent_path, "w") as sent_f: print(sent, file=sent_f)
python
def tar_open(f): """Open either a filename or a file-like object as a TarFile. Parameters ---------- f : str or file-like object The filename or file-like object from which to read. Returns ------- TarFile A `TarFile` instance. """ if isinstance(f, six.string_types): return tarfile.open(name=f) else: return tarfile.open(fileobj=f)
python
def wait(value, must_be_child=False): '''Wait for a possible asynchronous value to complete. ''' current = getcurrent() parent = current.parent if must_be_child and not parent: raise MustBeInChildGreenlet('Cannot wait on main greenlet') return parent.switch(value) if parent else value
python
def register_converter(self, converter, conv_type, conv_format=None, *, name=None): """ Register custom path parameter converter. :param BaseConverter converter: Converter Subclass of werkzeug's BaseConverter :param str conv_type: Parameter type :param str conv_format: Parameter format (optional) :param str name: Name of the converter. If not None, this name is used to register the converter in the Flask app. Example:: api.register_converter( UUIDConverter, 'string', 'UUID', name='uuid') @blp.route('/pets/{uuid:pet_id}') # ... api.register_blueprint(blp) This registers the converter in the Flask app and in the internal APISpec instance. Once the converter is registered, all paths using it will have corresponding path parameter documented with the right type and format. The `name` parameter need not be passed if the converter is already registered in the app, for instance if it belongs to a Flask extension that already registers it in the app. """ if name: self.app.url_map.converters[name] = converter self.spec.register_converter(converter, conv_type, conv_format)
java
public void marshall(DescribeGlobalTableRequest describeGlobalTableRequest, ProtocolMarshaller protocolMarshaller) { if (describeGlobalTableRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(describeGlobalTableRequest.getGlobalTableName(), GLOBALTABLENAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public final hqlParser.updateStatement_return updateStatement() throws RecognitionException { hqlParser.updateStatement_return retval = new hqlParser.updateStatement_return(); retval.start = input.LT(1); CommonTree root_0 = null; Token UPDATE6=null; Token VERSIONED7=null; ParserRuleReturnScope optionalFromTokenFromClause8 =null; ParserRuleReturnScope setClause9 =null; ParserRuleReturnScope whereClause10 =null; CommonTree UPDATE6_tree=null; CommonTree VERSIONED7_tree=null; try { // hql.g:165:2: ( UPDATE ^ ( VERSIONED )? optionalFromTokenFromClause setClause ( whereClause )? ) // hql.g:165:4: UPDATE ^ ( VERSIONED )? optionalFromTokenFromClause setClause ( whereClause )? { root_0 = (CommonTree)adaptor.nil(); UPDATE6=(Token)match(input,UPDATE,FOLLOW_UPDATE_in_updateStatement627); UPDATE6_tree = (CommonTree)adaptor.create(UPDATE6); root_0 = (CommonTree)adaptor.becomeRoot(UPDATE6_tree, root_0); // hql.g:165:12: ( VERSIONED )? int alt2=2; int LA2_0 = input.LA(1); if ( (LA2_0==VERSIONED) ) { alt2=1; } switch (alt2) { case 1 : // hql.g:165:13: VERSIONED { VERSIONED7=(Token)match(input,VERSIONED,FOLLOW_VERSIONED_in_updateStatement631); VERSIONED7_tree = (CommonTree)adaptor.create(VERSIONED7); adaptor.addChild(root_0, VERSIONED7_tree); } break; } pushFollow(FOLLOW_optionalFromTokenFromClause_in_updateStatement637); optionalFromTokenFromClause8=optionalFromTokenFromClause(); state._fsp--; adaptor.addChild(root_0, optionalFromTokenFromClause8.getTree()); pushFollow(FOLLOW_setClause_in_updateStatement641); setClause9=setClause(); state._fsp--; adaptor.addChild(root_0, setClause9.getTree()); // hql.g:168:3: ( whereClause )? int alt3=2; int LA3_0 = input.LA(1); if ( (LA3_0==WHERE) ) { alt3=1; } switch (alt3) { case 1 : // hql.g:168:4: whereClause { pushFollow(FOLLOW_whereClause_in_updateStatement646); whereClause10=whereClause(); state._fsp--; adaptor.addChild(root_0, whereClause10.getTree()); } break; } } retval.stop = input.LT(-1); retval.tree = (CommonTree)adaptor.rulePostProcessing(root_0); adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop); } catch (RecognitionException re) { reportError(re); recover(input,re); retval.tree = (CommonTree)adaptor.errorNode(input, retval.start, input.LT(-1), re); } finally { // do for sure before leaving } return retval; }
java
public static long extractTimestamp64Ascii(String id64ascii) throws NumberFormatException { return extractTimestamp64(Long.parseLong(id64ascii, Character.MAX_RADIX)); }