language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def get_provisioned_table_read_units(table_name): """ Returns the number of provisioned read units for the table :type table_name: str :param table_name: Name of the DynamoDB table :returns: int -- Number of read units """ try: desc = DYNAMODB_CONNECTION.describe_table(table_name) except JSONResponseError: raise read_units = int( desc[u'Table'][u'ProvisionedThroughput'][u'ReadCapacityUnits']) logger.debug('{0} - Currently provisioned read units: {1:d}'.format( table_name, read_units)) return read_units
java
public List<CmsSitemapTreeNodeData> getChildren(CmsSitemapTreeNodeData nodeData) { CmsClientSitemapEntry entry = nodeData.getClientEntry(); List<CmsSitemapTreeNodeData> result = Lists.newArrayList(); try { CmsVfsSitemapService svc = getSitemapService(); CmsClientSitemapEntry ent = svc.getChildren(m_root.getRootPath(), entry.getId(), 1); for (CmsClientSitemapEntry subEnt : ent.getSubEntries()) { if (subEnt.isInNavigation() && ((subEnt.getDefaultFileId() != null) || subEnt.isNavigationLevelType())) { try { CmsUUID idToRead = subEnt.getId(); if (subEnt.getDefaultFileId() != null) { idToRead = subEnt.getDefaultFileId(); } Locale l1 = OpenCms.getLocaleManager().getDefaultLocale( svc.getCmsObject(), svc.getCmsObject().readResource(idToRead)); Locale l2 = OpenCms.getLocaleManager().getDefaultLocale( svc.getCmsObject(), svc.getCmsObject().readResource(ent.getId(), CmsResourceFilter.IGNORE_EXPIRATION)); if (!l1.equals(l2)) { continue; } } catch (Exception e) { LOG.error(e.getLocalizedMessage(), e); } CmsSitemapTreeNodeData data = new CmsSitemapTreeNodeData( m_localeContext.getRootLocale(), m_localeContext.getComparisonLocale()); if (m_foldersWithNoChildFolders.contains(subEnt.getId())) { data.setHasNoChildren(true); } data.setClientEntry(subEnt); try { data.initialize(m_cms); result.add(data); } catch (Exception e) { LOG.error(e.getLocalizedMessage()); } } } } catch (CmsException e) { LOG.error(e.getLocalizedMessage(), e); } catch (CmsRpcException e) { LOG.error(e.getLocalizedMessage(), e); } return result; }
java
public void marshall(BatchReadException batchReadException, ProtocolMarshaller protocolMarshaller) { if (batchReadException == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(batchReadException.getType(), TYPE_BINDING); protocolMarshaller.marshall(batchReadException.getMessage(), MESSAGE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public void setCommerceNotificationTemplateUserSegmentRelLocalService( com.liferay.commerce.notification.service.CommerceNotificationTemplateUserSegmentRelLocalService commerceNotificationTemplateUserSegmentRelLocalService) { this.commerceNotificationTemplateUserSegmentRelLocalService = commerceNotificationTemplateUserSegmentRelLocalService; }
java
@Override public ListBonusPaymentsResult listBonusPayments(ListBonusPaymentsRequest request) { request = beforeClientExecution(request); return executeListBonusPayments(request); }
java
public static void main(String[] pArgs) throws IOException { if (pArgs.length == 1) { System.out.println(encode(pArgs[0].getBytes())); } else if (pArgs.length == 2 && ("-d".equals(pArgs[0]) || "--decode".equals(pArgs[0]))) { System.out.println(new String(decode(pArgs[1]))); } else { System.err.println("BASE64 [ -d | --decode ] arg"); System.err.println("Encodes or decodes a given string"); System.exit(5); } }
java
public static void checkStringNotNullOrEmpty(String parameterName, String value) { if (TextUtils.isEmpty(value)) { throw Exceptions.IllegalArgument("Current input string %s is %s.", parameterName, value == null ? "null" : "empty"); } }
python
def daylight_saving_end_day(self, value=None): """Corresponds to IDD Field `daylight_saving_end_day` Args: value (str): value for IDD Field `daylight_saving_end_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = str(value) except ValueError: raise ValueError( 'value {} need to be of type str ' 'for field `daylight_saving_end_day`'.format(value)) if ',' in value: raise ValueError('value should not contain a comma ' 'for field `daylight_saving_end_day`') self._daylight_saving_end_day = value
python
def update(self, **values): """ Performs an update on the row selected by the queryset. Include values to update in the update like so: .. code-block:: python Model.objects(key=n).update(value='x') Passing in updates for columns which are not part of the model will raise a ValidationError. Per column validation will be performed, but instance level validation will not (i.e., `Model.validate` is not called). This is sometimes referred to as a blind update. For example: .. code-block:: python class User(Model): id = Integer(primary_key=True) name = Text() setup(["localhost"], "test") sync_table(User) u = User.create(id=1, name="jon") User.objects(id=1).update(name="Steve") # sets name to null User.objects(id=1).update(name=None) Also supported is blindly adding and removing elements from container columns, without loading a model instance from Cassandra. Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a non container column. However, adding `__<operation>` to the end of the keyword arg, makes the update call add or remove items from the collection, without overwriting then entire column. Given the model below, here are the operations that can be performed on the different container columns: .. code-block:: python class Row(Model): row_id = columns.Integer(primary_key=True) set_column = columns.Set(Integer) list_column = columns.List(Integer) map_column = columns.Map(Integer, Integer) :class:`~cqlengine.columns.Set` - `add`: adds the elements of the given set to the column - `remove`: removes the elements of the given set to the column .. code-block:: python # add elements to a set Row.objects(row_id=5).update(set_column__add={6}) # remove elements to a set Row.objects(row_id=5).update(set_column__remove={4}) :class:`~cqlengine.columns.List` - `append`: appends the elements of the given list to the end of the column - `prepend`: prepends the elements of the given list to the beginning of the column .. code-block:: python # append items to a list Row.objects(row_id=5).update(list_column__append=[6, 7]) # prepend items to a list Row.objects(row_id=5).update(list_column__prepend=[1, 2]) :class:`~cqlengine.columns.Map` - `update`: adds the given keys/values to the columns, creating new entries if they didn't exist, and overwriting old ones if they did .. code-block:: python # add items to a map Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4}) # remove items from a map Row.objects(row_id=5).update(map_column__remove={1, 2}) """ if not values: return nulled_columns = set() updated_columns = set() us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists) for name, val in values.items(): col_name, col_op = self._parse_filter_arg(name) col = self.model._columns.get(col_name) # check for nonexistant columns if col is None: raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.model.__name__, col_name)) # check for primary key update attempts if col.is_primary_key: raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(col_name, self.__module__, self.model.__name__)) if col_op == 'remove' and isinstance(col, columns.Map): if not isinstance(val, set): raise ValidationError( "Cannot apply update operation '{0}' on column '{1}' with value '{2}'. A set is required.".format(col_op, col_name, val)) val = {v: None for v in val} else: # we should not provide default values in this use case. val = col.validate(val) if val is None: nulled_columns.add(col_name) continue us.add_update(col, val, operation=col_op) updated_columns.add(col_name) if us.assignments: self._execute(us) if nulled_columns: delete_conditional = [condition for condition in self._conditional if condition.field not in updated_columns] if self._conditional else None ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where, conditionals=delete_conditional, if_exists=self._if_exists) self._execute(ds)
java
@SuppressWarnings("unchecked") public static FieldSpec<XMLAnn>[] getXMLSpecs(Class<? extends Model> cls) { FieldSpec<XMLAnn>[] specs = XML_SPECS.get(cls); if (specs == null) { ArrayList<FieldSpec<XMLAnn>> list = new ArrayList<FieldSpec<XMLAnn>>(); for (Field field : getFieldHierarchy(cls)) { XMLAnn ann = getXMLAnn(field); if (ann != null) { ann.tag = getName(ann.tag, field); list.add(new FieldSpec<XMLAnn>(field, ann)); } } specs = list.toArray(new FieldSpec[list.size()]); XML_SPECS.put(cls, specs); } return specs; }
python
def add_parser_from_field(self, name, field_cls): """Register a new parser method with name ``name``, given a marshmallow ``Field``.""" self.__parser_map__[name] = _field2method(field_cls, method_name=name)
java
public void getNewOptions(final String clientId, final I_CmsSimpleCallback<CmsDialogOptionsAndInfo> callback) { CmsRpcAction<CmsDialogOptionsAndInfo> action = new CmsRpcAction<CmsDialogOptionsAndInfo>() { @Override public void execute() { getContainerpageService().getNewOptions( clientId, getData().getRpcContext().getPageStructureId(), getData().getRequestParams(), this); } @Override protected void onResponse(CmsDialogOptionsAndInfo result) { callback.execute(result); } }; action.execute(); }
java
public final EObject entryRuleAbstractMetamodelDeclaration() throws RecognitionException { EObject current = null; EObject iv_ruleAbstractMetamodelDeclaration = null; try { // InternalXtext.g:343:69: (iv_ruleAbstractMetamodelDeclaration= ruleAbstractMetamodelDeclaration EOF ) // InternalXtext.g:344:2: iv_ruleAbstractMetamodelDeclaration= ruleAbstractMetamodelDeclaration EOF { newCompositeNode(grammarAccess.getAbstractMetamodelDeclarationRule()); pushFollow(FollowSets000.FOLLOW_1); iv_ruleAbstractMetamodelDeclaration=ruleAbstractMetamodelDeclaration(); state._fsp--; current =iv_ruleAbstractMetamodelDeclaration; match(input,EOF,FollowSets000.FOLLOW_2); } } catch (RecognitionException re) { recover(input,re); appendSkippedTokens(); } finally { } return current; }
java
Stream<TreeNode> getFiltered(Stream<TreeNode> delegateNodes) { return delegateNodes .filter(delegateNode -> filter.acceptNode(this, delegateNode)) .map(delegateNode -> delegateToThis.get(delegateNode)) .filter(filteredNode -> filteredNode != null); }
python
def holtWintersConfidenceArea(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations. """ bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results
python
def _get_mu_tensor(self): """Get the min mu which minimize the surrogate. Returns: The mu_t. """ root = self._get_cubic_root() dr = self._h_max / self._h_min mu = tf.maximum( root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2) return mu
python
def group(*blueprints, url_prefix=""): """ Create a list of blueprints, optionally grouping them under a general URL prefix. :param blueprints: blueprints to be registered as a group :param url_prefix: URL route to be prepended to all sub-prefixes """ def chain(nested): """itertools.chain() but leaves strings untouched""" for i in nested: if isinstance(i, (list, tuple)): yield from chain(i) elif isinstance(i, BlueprintGroup): yield from i.blueprints else: yield i bps = BlueprintGroup(url_prefix=url_prefix) for bp in chain(blueprints): if bp.url_prefix is None: bp.url_prefix = "" bp.url_prefix = url_prefix + bp.url_prefix bps.append(bp) return bps
python
def pop(self, option, default=None): '''Just like `dict.pop`''' val = self[option] del self[option] return (val is None and default) or val
python
def tag_sites(self, scaled_positions, symprec=1e-3): """Returns an integer array of the same length as *scaled_positions*, tagging all equivalent atoms with the same index. Example: >>> from ase.lattice.spacegroup import Spacegroup >>> sg = Spacegroup(225) # fcc >>> sg.tag_sites([[0.0, 0.0, 0.0], ... [0.5, 0.5, 0.0], ... [1.0, 0.0, 0.0], ... [0.5, 0.0, 0.0]]) array([0, 0, 0, 1]) """ scaled = np.array(scaled_positions, ndmin=2) scaled %= 1.0 scaled %= 1.0 tags = -np.ones((len(scaled), ), dtype=int) mask = np.ones((len(scaled), ), dtype=np.bool) rot, trans = self.get_op() i = 0 while mask.any(): pos = scaled[mask][0] sympos = np.dot(rot, pos) + trans # Must be done twice, see the scaled_positions.py test sympos %= 1.0 sympos %= 1.0 m = ~np.all(np.any(np.abs(scaled[np.newaxis,:,:] - sympos[:,np.newaxis,:]) > symprec, axis=2), axis=0) assert not np.any((~mask) & m) tags[m] = i mask &= ~m i += 1 return tags
python
def _execute_example_group(self): "Handles the execution of Example Group" for example in self.example: runner = self.__class__(example, self.formatter) runner.is_root_runner = False successes, failures, skipped = runner.run(self.context) self.num_successes += successes self.num_failures += failures self.num_skipped += skipped
python
def item_names(self): """Item names.""" if "item_names" not in self.attrs.keys(): self.attrs["item_names"] = np.array([], dtype="S") return tuple(n.decode() for n in self.attrs["item_names"])
java
public void setSectionsAlwaysVisible(final boolean VISIBLE) { if (null == sectionsAlwaysVisible) { _sectionsAlwaysVisible = VISIBLE; fireUpdateEvent(REDRAW_EVENT); } else { sectionsAlwaysVisible.set(VISIBLE); } }
python
def lastOfferedMonth(self): ''' Sometimes a Series is associated with a month other than the one in which the first class begins, so this returns a (year,month) tuple that can be used in admin instead. ''' lastOfferedSeries = self.event_set.order_by('-startTime').first() return (lastOfferedSeries.year,lastOfferedSeries.month)
python
def export_certificate(ctx, slot, format, certificate): """ Export a X.509 certificate. Reads a certificate from one of the slots on the YubiKey. \b SLOT PIV slot to read certificate from. CERTIFICATE File to write certificate to. Use '-' to use stdout. """ controller = ctx.obj['controller'] try: cert = controller.read_certificate(slot) except APDUError as e: if e.sw == SW.NOT_FOUND: ctx.fail('No certificate found.') else: logger.error('Failed to read certificate from slot %s', slot, exc_info=e) certificate.write(cert.public_bytes(encoding=format))
python
def windows_k_distinct(x, k): """Find all largest windows containing exactly k distinct elements :param x: list or string :param k: positive integer :yields: largest intervals [i, j) with len(set(x[i:j])) == k :complexity: `O(|x|)` """ dist, i, j = 0, 0, 0 # dist = |{x[i], ..., x[j-1]}| occ = {xi: 0 for xi in x} # number of occurrences in x[i:j] while j < len(x): while dist == k: # move start of interval occ[x[i]] -= 1 # update counters if occ[x[i]] == 0: dist -= 1 i += 1 while j < len(x) and (dist < k or occ[x[j]]): if occ[x[j]] == 0: # update counters dist += 1 occ[x[j]] += 1 j += 1 # move end of interval if dist == k: yield (i, j)
python
def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : str / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ # create a new object to prevent aliasing if subset is None: subset = self.obj self = self._shallow_copy(subset) self._reset_cache() if subset.ndim == 2: if is_scalar(key) and key in subset or is_list_like(key): self._selection = key return self
python
def concatenate_children(node, concatenate_with, strategy): """ Concatenate children of node according to https://ocr-d.github.io/page#consistency-of-text-results-on-different-levels """ _, _, getter, concatenate_with = [x for x in _HIERARCHY if isinstance(node, x[0])][0] tokens = [get_text(x, strategy) for x in getattr(node, getter)()] return concatenate_with.join(tokens).strip()
python
def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True): """ Lowpass filter the time series using an FIR filtered generated from the ideal response passed through a kaiser window (beta = 5.0) Parameters ---------- Time Series: TimeSeries The time series to be low-passed. frequency: float The frequency below which is suppressed. order: int Number of corrupted samples on each side of the time series beta: float Beta parameter of the kaiser window that sets the side lobe attenuation. remove_corrupted : {True, boolean} If True, the region of the time series corrupted by the filtering is excised before returning. If false, the corrupted regions are not excised and the full time series is returned. """ from pycbc.filter import lowpass_fir ts = lowpass_fir(self, frequency, order, beta=beta) if remove_corrupted: ts = ts[order:len(ts)-order] return ts
java
@Override public void visitClassContext(ClassContext classContext) { try { javaClass = classContext.getJavaClass(); if (javaClass.getMajor() >= Const.MAJOR_1_5) { javaClass.accept(this); } } finally { javaClass = null; } }
python
def add(cls, module_name, msg=''): ''' Until the guard is dropped again, disallow imports of the module given by ``module_name``. If the module is imported while the guard is in place an ``ImportGuard`` is raised. An additional message on why the module cannot be imported can optionally be specified using the parameter ``msg``. If multiple guards are placed on the same module, all these guards have to be dropped before the module can be imported again. ''' if module_name in sys.modules: raise ImportGuard( 'Module to guard has already been imported: ' + module_name ) cls._guards.setdefault(module_name, []) cls._guards[module_name].append(msg) cls._num_entries += 1 cls._install()
python
def triggerItem(self, item): """ Triggers the item by calling its action's toggled state to display or hide the dock panel. :param item | <QtGui.QTreeWidgetItem> """ if not item: return # emit the trigger action self._triggerText = item.text(0) self._completer.hide() self._completer.setCurrentItem(None) self.parent().hide() # trigger the action unwrapVariant(item.data(0, Qt.UserRole)).trigger()
java
private ParseTree parseStatementStandard() { switch (peekType()) { case OPEN_CURLY: return parseBlock(); case CONST: case VAR: return parseVariableStatement(); case SEMI_COLON: return parseEmptyStatement(); case IF: return parseIfStatement(); case DO: return parseDoWhileStatement(); case WHILE: return parseWhileStatement(); case FOR: return parseForStatement(); case CONTINUE: return parseContinueStatement(); case BREAK: return parseBreakStatement(); case RETURN: return parseReturnStatement(); case WITH: return parseWithStatement(); case SWITCH: return parseSwitchStatement(); case THROW: return parseThrowStatement(); case TRY: return parseTryStatement(); case DEBUGGER: return parseDebuggerStatement(); default: if (peekLabelledStatement()) { return parseLabelledStatement(); } return parseExpressionStatement(); } }
java
@Override public void tick(long now) { if (now - this.lastM3UAMessageTime >= this.aspFactoryImpl.m3UAManagementImpl.getHeartbeatTime()) { this.lastM3UAMessageTime = now; this.heartBeatAckMissed++; this.aspFactoryImpl.write(HEART_BEAT); } if (this.heartBeatAckMissed > HEART_BEAT_ACK_MISSED_ALLOWED) { logger.warn(String .format("HEART_BEAT ACK missed %d is greater than configured %d for AspFactory %s. Underlying Association will be stopped and started again", this.heartBeatAckMissed, HEART_BEAT_ACK_MISSED_ALLOWED, this.aspFactoryImpl.getName())); try { this.aspFactoryImpl.transportManagement.stopAssociation(this.aspFactoryImpl.associationName); } catch (Exception e) { logger.warn(String.format("Error while trying to stop underlying Association for AspFactpry=%s", this.aspFactoryImpl.getName()), e); } try { this.aspFactoryImpl.transportManagement.startAssociation(this.aspFactoryImpl.associationName); } catch (Exception e) { logger.error(String.format("Error while trying to start underlying Association for AspFactpry=%s", this.aspFactoryImpl.getName()), e); } // finally cancel this.cancel(); } }
java
private static String getPluralCaseOpenString(SoyMsgPluralCaseSpec pluralCaseSpec) { String icuCaseName = (pluralCaseSpec.getType() == SoyMsgPluralCaseSpec.Type.EXPLICIT) ? "=" + pluralCaseSpec.getExplicitValue() : pluralCaseSpec.getType().name().toLowerCase(); return icuCaseName + "{"; }
python
def cli(ctx, ids, query, filters, details, interval): """Watch for new alerts.""" if details: display = 'details' else: display = 'compact' from_date = None auto_refresh = True while auto_refresh: try: auto_refresh, from_date = ctx.invoke(query_cmd, ids=ids, query=query, filters=filters, display=display, from_date=from_date) time.sleep(interval) except (KeyboardInterrupt, SystemExit) as e: sys.exit(e)
java
public boolean markMigrated(VectorClock vectorClock) { stateWriteLock.lock(); try { if (stateVectorClock.equals(vectorClock)) { migrated = true; } return migrated; } finally { stateWriteLock.unlock(); } }
java
public void updateBigDecimal(String columnLabel, BigDecimal value) throws SQLException { updateBigDecimal(findColumn(columnLabel), value); }
python
def num(self, num, h=None, to_x=None, format="%.2f", change_style=None, change_size=None): """print number (int/float) right aligned before defined (to_x) position """ self.txt(format % num, h=h, to_x=to_x, change_style=change_style, change_size=change_size)
java
@Override public String parse(final String threshold, final RangeConfig tc) throws RangeException { if (threshold == null) { throw new RangeException("Range can't be null"); } Stage currentStage = this; String parsedThreshold = threshold; boolean stageParsed; while (parsedThreshold.length() != 0) { stageParsed = false; for (String transitionName : currentStage.getTransitionNames()) { Stage transition = currentStage.getTransition(transitionName); if (transition.canParse(parsedThreshold)) { parsedThreshold = transition.parse(parsedThreshold, tc); currentStage = transition; stageParsed = true; break; } } if (!stageParsed) { throw new InvalidRangeSyntaxException(currentStage, parsedThreshold, threshold); } } if (!currentStage.isLeaf()) { throw new PrematureEndOfRangeException(currentStage, threshold); } return parsedThreshold; }
python
def _check_enclosing_characters(string, opener, closer): """ Makes sure that the enclosing characters for a definition set make sense 1) There is only one set 2) They are in the right order (opening, then closing) """ opener_count = string.count(opener) closer_count = string.count(closer) total = opener_count + closer_count if total > 2: msg = MORE_THAN_ONE_SET.format(opener, closer) raise ValueError(msg) elif total == 1: msg = INCOMPLETE_SET.format(opener, closer) raise ValueError(msg) elif opener_count > 1: msg = INCORRECT_SET_CONSTITUENT.format(opener) raise ValueError(msg) elif closer_count > 1: msg = INCORRECT_SET_CONSTITUENT.format(closer) raise ValueError(msg)
java
public void onDoubleClick(DoubleClickEvent event) { tempLength = 0; mapWidget.unregisterWorldPaintable(distanceLine); mapWidget.unregisterWorldPaintable(lineSegment); distanceLine.setGeometry(null); lineSegment.setGeometry(null); if (panel != null) { panel.destroy(); } dispatchState(State.STOP); }
python
def connect(url=None, schema=None, sql_path=None, multiprocessing=False): """Open a new connection to postgres via psycopg2/sqlalchemy """ if url is None: url = os.environ.get("DATABASE_URL") return Database(url, schema, sql_path=sql_path, multiprocessing=multiprocessing)
python
def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE, frange=DEFAULT_FRANGE, duration=None, sampling=None, **kwargs): """Transform data by scanning over a `QTiling` This utility is provided mainly to allow direct manipulation of the `QTiling.transform` output. Most users probably just want to use :meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this. Parameters ---------- data : `~gwpy.timeseries.TimeSeries` or `ndarray` the time- or frequency-domain input data mismatch : `float`, optional maximum allowed fractional mismatch between neighbouring tiles qrange : `tuple` of `float`, optional `(low, high)` range of Qs to scan frange : `tuple` of `float`, optional `(low, high)` range of frequencies to scan duration : `float`, optional duration (seconds) of input, required if `data` is not a `TimeSeries` sampling : `float`, optional sample rate (Hertz) of input, required if `data` is not a `TimeSeries` **kwargs other keyword arguments to be passed to :meth:`QTiling.transform`, including ``'epoch'`` and ``'search'`` Returns ------- qgram : `QGram` the raw output of :meth:`QTiling.transform` far : `float` expected false alarm rate (Hertz) of white Gaussian noise with the same peak energy and total duration as `qgram` """ from gwpy.timeseries import TimeSeries # prepare input if isinstance(data, TimeSeries): duration = abs(data.span) sampling = data.sample_rate.to('Hz').value kwargs.update({'epoch': data.t0.value}) data = data.fft().value # return a raw Q-transform and its significance qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange, frange=frange).transform(data, **kwargs) far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration return (qgram, far)
java
@Override public List<GoogleCloudStorageItemInfo> listObjectInfo( String bucketName, String objectNamePrefix, String delimiter, long maxResults) throws IOException { List<GoogleCloudStorageItemInfo> result; if (options.isListCachingEnabled()) { result = cache.getList(bucketName, objectNamePrefix); if (result == null) { result = super.listObjectInfo(bucketName, objectNamePrefix, null); cache.putList(bucketName, objectNamePrefix, result); } filter(result, bucketName, objectNamePrefix, delimiter); if (maxResults > 0 && result.size() > maxResults) { result = result.subList(0, (int) maxResults); } } else { result = super.listObjectInfo(bucketName, objectNamePrefix, delimiter, maxResults); for (GoogleCloudStorageItemInfo item : result) { cache.putItem(item); } } return result; }
java
boolean scanSpecialIdentifier(String identifier) { int length = identifier.length(); if (limit - currentPosition < length) { return false; } for (int i = 0; i < length; i++) { int character = identifier.charAt(i); if (character == sqlString.charAt(currentPosition + i)) { continue; } if (character == Character.toUpperCase(sqlString.charAt(currentPosition + i))) { continue; } return false; } currentPosition += length; return true; }
java
public void start() throws Throwable { Context context = new InitialContext(); context.bind(JNDI_NAME, this); context.close(); }
java
public SslContextBuilder trustManager(InputStream trustCertCollectionInputStream) { try { return trustManager(SslContext.toX509Certificates(trustCertCollectionInputStream)); } catch (Exception e) { throw new IllegalArgumentException("Input stream does not contain valid certificates.", e); } }
python
def _set_fe_access_check(self, v, load=False): """ Setter method for fe_access_check, mapped from YANG variable /sysmon/fe_access_check (container) If this variable is read-only (config: false) in the source YANG file, then _set_fe_access_check is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fe_access_check() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fe_access_check.fe_access_check, is_container='container', presence=False, yang_name="fe-access-check", rest_name="fe-access-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fe Access Check', u'callpoint': u'feAccessCheck', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fe_access_check must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fe_access_check.fe_access_check, is_container='container', presence=False, yang_name="fe-access-check", rest_name="fe-access-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fe Access Check', u'callpoint': u'feAccessCheck', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-sysmon', defining_module='brocade-sysmon', yang_type='container', is_config=True)""", }) self.__fe_access_check = t if hasattr(self, '_set'): self._set()
python
def _get_default_dependencies(self): ''' Get default dependencies for archive Get default dependencies from requirements file or (if no requirements file) from previous version ''' # Get default dependencies from requirements file default_dependencies = { k: v for k, v in self.api.default_versions.items() if k != self.archive_name} # If no requirements file or is empty: if len(default_dependencies) == 0: # Retrieve dependencies from last archive record history = self.get_history() if len(history) > 0: default_dependencies = history[-1].get('dependencies', {}) return default_dependencies
java
public final ListCryptoKeyVersionsPagedResponse listCryptoKeyVersions(CryptoKeyName parent) { ListCryptoKeyVersionsRequest request = ListCryptoKeyVersionsRequest.newBuilder() .setParent(parent == null ? null : parent.toString()) .build(); return listCryptoKeyVersions(request); }
python
def construct_datapipeline(env='', generated=None, previous_env=None, region='us-east-1', settings=None, pipeline_data=None): """Create the Pipeline JSON from template. This handles the common repeatable patterns in a pipeline, such as judgement, infrastructure, tagger and qe. Args: env (str): Deploy environment name, e.g. dev, stage, prod. generated (gogoutils.Generator): Gogo Application name generator. previous_env (str): The previous deploy environment to use as Trigger. region (str): AWS Region to deploy to. settings (dict): Environment settings from configurations. Returns: dict: Pipeline JSON template rendered with configurations. """ LOG.info('%s block for [%s].', env, region) if env.startswith('prod'): template_name = 'pipeline/pipeline_{}_datapipeline.json.j2'.format(env) else: template_name = 'pipeline/pipeline_stages_datapipeline.json.j2' LOG.debug('%s info:\n%s', env, pformat(settings)) gen_app_name = generated.app_name() data = copy.deepcopy(settings) data['app'].update({ 'appname': gen_app_name, 'repo_name': generated.repo, 'group_name': generated.project, 'environment': env, 'region': region, 'previous_env': previous_env, 'promote_restrict': pipeline_data['promote_restrict'], 'owner_email': pipeline_data['owner_email'] }) LOG.debug('Block data:\n%s', pformat(data)) pipeline_json = get_template(template_file=template_name, data=data, formats=generated) return pipeline_json
java
private void processTasks() throws SQLException { List<Row> bars = getTable("BAR"); List<Row> expandedTasks = getTable("EXPANDED_TASK"); List<Row> tasks = getTable("TASK"); List<Row> milestones = getTable("MILESTONE"); m_reader.processTasks(bars, expandedTasks, tasks, milestones); }
java
private TokenResponse executeLeniently() throws IOException { // must set clientAuthentication as last execute interceptor in case it // needs to sign request HttpRequestFactory requestFactory = getTransport().createRequestFactory(new HttpRequestInitializer() { public void initialize(HttpRequest request) throws IOException { if (getRequestInitializer() != null) { getRequestInitializer().initialize(request); } final HttpExecuteInterceptor interceptor = request.getInterceptor(); request.setInterceptor(new HttpExecuteInterceptor() { public void intercept(HttpRequest request) throws IOException { if (interceptor != null) { interceptor.intercept(request); } if (getClientAuthentication() != null) { getClientAuthentication().intercept(request); } } }); } }); // make request HttpRequest request = requestFactory.buildPostRequest(getTokenServerUrl(), new UrlEncodedContent(this)); request.setParser(new JsonObjectParser(getJsonFactory())); request.setThrowExceptionOnExecuteError(false); HttpResponse response = request.execute(); if (response.isSuccessStatusCode()) { if (!HttpResponseUtils.hasMessageBody(response)) { return null; } // check and see if status code is 200 but has error response String responseContent = HttpResponseUtils.parseAsStringWithoutClosing(response); TokenResponse tokenResponse = response .getRequest() .getParser() .parseAndClose(new StringReader(responseContent), TokenResponse.class); if (tokenResponse.containsKey("error")) { throw LenientTokenResponseException.from(getJsonFactory(), response, responseContent); } return response.getRequest().getParser() .parseAndClose(new StringReader(responseContent), TokenResponse.class); } throw TokenResponseException.from(getJsonFactory(), response); }
python
def get_validate_upload_form_kwargs(self): """ Return the keyword arguments for instantiating the form for validating the upload. """ kwargs = { 'storage': self.get_storage(), 'upload_to': self.get_upload_to(), 'content_type_prefix': self.get_content_type_prefix(), 'process_to': self.get_process_to(), 'processed_key_generator': self.get_processed_key_generator(), } # ``data`` may be provided by a POST from the JavaScript if using a # DropZone form, or as querystrings on a redirect GET request from # Amazon if not. data = { 'bucket_name': self._get_bucket_name(), 'key_name': self._get_key_name(), 'etag': self._get_etag(), } kwargs.update({'data': data}) return kwargs
java
@Override public void renderTemplate(final String templateName, final Map<String, Object> context, final Map<String, WComponent> taggedComponents, final Writer writer, final Map<String, Object> options) { LOG.debug("Rendering handlebars template " + templateName); try { // Map the tagged components to be used in the replace writer Map<String, WComponent> componentsByKey = TemplateUtil.mapTaggedComponents(context, taggedComponents); // Get Engine Handlebars handlebars = getHandlebarsEngine(options); // Load template (Handlebars loader makes the template name "absolute") Template template = handlebars.compile(templateName); // Setup handlebars context Context handlebarsContext = createContext(context); // Render writeTemplate(template, handlebarsContext, componentsByKey, writer); } catch (FileNotFoundException e) { throw new SystemException("Could not find handlebars template [" + templateName + "]. " + e.getMessage(), e); } catch (Exception e) { throw new SystemException("Problems with handlebars template [" + templateName + "]. " + e.getMessage(), e); } }
python
def _newer_tags_get(current_highest, versions): """ Returns versions from versions which are greater than than the highest version in each major. If a newer major is present in versions which is not present on current_highest, an exception will be raised. @param current_highest: as returned by VersionsFile.highest_version_major() @param versions: a list of versions. @return: a list of versions. @raise MissingMajorException: A new version from a newer major branch is exists, but will not be downloaded due to it not being in majors. """ newer = [] for major in current_highest: highest_version = current_highest[major] for version in versions: version = version.lstrip('v') if version.startswith(major) and version_gt(version, highest_version): newer.append(version) _check_newer_major(current_highest, versions) return newer
python
def dcounts(self): """ :return: a data frame with names and distinct counts and fractions for all columns in the database """ print("WARNING: Distinct value count for all tables can take a long time...", file=sys.stderr) sys.stderr.flush() data = [] for t in self.tables(): for c in t.columns(): data.append([t.name(), c.name(), c.dcount(), t.size(), c.dcount() / float(t.size())]) df = pd.DataFrame(data, columns=["table", "column", "distinct", "size", "fraction"]) return df
python
def get_reference_lines(docbody, ref_sect_start_line, ref_sect_end_line, ref_sect_title, ref_line_marker_ptn, title_marker_same_line): """After the reference section of a document has been identified, and the first and last lines of the reference section have been recorded, this function is called to take the reference lines out of the document body. The document's reference lines are returned in a list of strings whereby each string is a reference line. Before this can be done however, the reference section is passed to another function that rebuilds any broken reference lines. @param docbody: (list) of strings - the entire document body. @param ref_sect_start_line: (integer) - the index in docbody of the first reference line. @param ref_sect_end_line: (integer) - the index in docbody of the last reference line. @param ref_sect_title: (string) - the title of the reference section (e.g. "References"). @param ref_line_marker_ptn: (string) - the patern used to match the marker for each reference line (e.g., could be used to match lines with markers of the form [1], [2], etc.) @param title_marker_same_line: (integer) - a flag to indicate whether or not the reference section title was on the same line as the first reference line's marker. @return: (list) of strings. Each string is a reference line, extracted from the document. """ start_idx = ref_sect_start_line if title_marker_same_line: # Title on same line as 1st ref- take title out! title_start = docbody[start_idx].find(ref_sect_title) if title_start != -1: # Set the first line with no title docbody[start_idx] = docbody[start_idx][title_start + len(ref_sect_title):] elif ref_sect_title is not None: # Set the start of the reference section to be after the title line start_idx += 1 if ref_sect_end_line is not None: ref_lines = docbody[start_idx:ref_sect_end_line + 1] else: ref_lines = docbody[start_idx:] if ref_sect_title: ref_lines = strip_footer(ref_lines, ref_sect_title) # Now rebuild reference lines: # (Go through each raw reference line, and format them into a set # of properly ordered lines based on markers) return rebuild_reference_lines(ref_lines, ref_line_marker_ptn)
python
def CutAtClosestPoint(self, p): """ Let x be the point on the polyline closest to p. Then CutAtClosestPoint returns two new polylines, one representing the polyline from the beginning up to x, and one representing x onwards to the end of the polyline. x is the first point returned in the second polyline. """ (closest, i) = self.GetClosestPoint(p) tmp = [closest] tmp.extend(self._points[i+1:]) return (Poly(self._points[0:i+1]), Poly(tmp))
python
def show_in_menus_custom(self, request=None, current_site=None, menu_instance=None, original_menu_tag=''): """ Return a boolean indicating whether this page should be included in menus being rendered. """ if not self.show_in_menus: return False if self.link_page: return self.link_page_is_suitable_for_display() return True
python
def save(self, path=None, format=None, mode=None, partitionBy=None, **options): """Saves the contents of the :class:`DataFrame` to a data source. The data source is specified by the ``format`` and a set of ``options``. If ``format`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. :param path: the path in a Hadoop supported file system :param format: the format used to save :param mode: specifies the behavior of the save operation when data already exists. * ``append``: Append contents of this :class:`DataFrame` to existing data. * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \ exists. :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.mode(mode).options(**options) if partitionBy is not None: self.partitionBy(partitionBy) if format is not None: self.format(format) if path is None: self._jwrite.save() else: self._jwrite.save(path)
java
@Override public MonitorInstancesResult monitorInstances(MonitorInstancesRequest request) { request = beforeClientExecution(request); return executeMonitorInstances(request); }
java
private void scanEntityMappings() throws PersistenceUnitScannerException { /* * From the JPA 2.1 Specification: * * 8.2.1.6.2 Object/relational Mapping Files * An object/relational mapping XML file contains mapping information for the classes listed in it. * * A object/relational mapping XML file named orm.xml may be specified in the META-INF directory * in the root of the persistence unit or in the META-INF directory of any jar file referenced by the persistence. * xml. Alternatively, or in addition, one or more mapping files may be referenced by the * mapping-file elements of the persistence-unit element. These mapping files may be * present anywhere on the class path. * * An orm.xml mapping file or other mapping file is loaded as a resource by the persistence provider. If * a mapping file is specified, the classes and mapping information specified in the mapping file will be * used as described in Chapter 12. If multiple mapping files are specified (possibly including one or more * orm.xml files), the resulting mappings are obtained by combining the mappings from all of the files. * The result is undefined if multiple mapping files (including any orm.xml file) referenced within a single * persistence unit contain overlapping mapping information for any given class. The object/relational * mapping information contained in any mapping file referenced within the persistence unit must be disjoint * at the class-level from object/relational mapping information contained in any other such mapping * file. */ final HashSet<URL> mappingFilesLocated = new HashSet<URL>(); final HashSet<String> searchNames = new HashSet<String>(); for (PersistenceUnitInfo pui : puiList) { try { mappingFilesLocated.clear(); searchNames.clear(); searchNames.add("META-INF/orm.xml"); if (pui.getMappingFileNames() != null) { searchNames.addAll(pui.getMappingFileNames()); } for (String mappingFile : searchNames) { mappingFilesLocated.addAll(findORMResources(pui, mappingFile)); } final List<EntityMappingsDefinition> parsedOrmList = pu_ormFileParsed_map.get(pui); pu_ormFiles_map.get(pui).addAll(mappingFilesLocated); // Process discovered mapping files for (final URL mappingFileURL : mappingFilesLocated) { if (scanned_ormfile_map.containsKey(mappingFileURL)) { // Already processed this ORM File, no need to process it again. parsedOrmList.add(scanned_ormfile_map.get(mappingFileURL)); continue; } EntityMappingsDefinition emapdef = EntityMappingsFactory.parseEntityMappings(mappingFileURL); parsedOrmList.add(emapdef); scanned_ormfile_map.put(mappingFileURL, emapdef); } } catch (Exception e) { FFDCFilter.processException(e, PersistenceUnitScanner.class.getName() + ".scanEntityMappings", "460"); throw new PersistenceUnitScannerException(e); } } }
java
public void init(Record record, ScreenLocation itsLocation, BasePanel parentScreen, Converter fieldConverter, int iDisplayFieldDesc, Map<String, Object> properties) { this.setAppending(true); // By default super.init(record, itsLocation, parentScreen, fieldConverter, iDisplayFieldDesc, properties); int iErrorCode = this.checkSecurity(); if ((iErrorCode != DBConstants.NORMAL_RETURN) && (iErrorCode != Constants.READ_ACCESS)) return; // Put the grid table in front of this record, so grid operations will work. Record gridRecord = this.getMainRecord(); // Even though grid table caches for me, I use the cache to take advantage of multiple reads. if (gridRecord != null) gridRecord.setOpenMode(gridRecord.getOpenMode() | DBConstants.OPEN_CACHE_RECORDS); // Cache recently used records. if (!ScreenConstants.HTML_SCREEN_TYPE.equalsIgnoreCase(this.getViewFactory().getViewSubpackage())) { gridRecord.setupRecordListener(this, true, false); // I need to listen for record changes BaseTable gridTable = gridRecord.getTable(); if (!(gridTable instanceof GridTable)) gridTable = new GridTable(null, gridRecord); // The record should not be in refresh mode (the table model handles read-rewrites). gridRecord.setOpenMode(gridRecord.getOpenMode() & ~DBConstants.OPEN_REFRESH_AND_LOCK_ON_CHANGE_STRATEGY); // Must have for GridTable to re-read. gridRecord.close(); } if (gridRecord != null) gridRecord.setDisplayOption(false); // Don't need to auto-display on change if ((m_iDisplayFieldDesc & ScreenConstants.SELECT_MODE) == ScreenConstants.SELECT_MODE) this.setEditing(false); // Don't allow editing, if select mode if ((gridRecord.getOpenMode() & DBConstants.OPEN_READ_ONLY) == DBConstants.OPEN_READ_ONLY) this.setEditing(false); // Don't allow editing, if read-only record if (this.getEditing() == false) this.setAppending(false); // Don't allow appending if read-only (by default) if ((gridRecord.getOpenMode() & DBConstants.OPEN_APPEND_ONLY) == DBConstants.OPEN_APPEND_ONLY) this.setAppending(true); // Do allow appending, if read-only record // Overriding class must add the columns and resize to content }
python
def _rapRperiAxiEq(R,E,L,pot): """The vr=0 equation that needs to be solved to find apo- and pericenter""" return E-potentialAxi(R,pot)-L**2./2./R**2.
java
@Override public void init() throws ServletException { super.init(); // GET operations.setOperation(ServletOperationSet.Method.GET, Extension.json, Operation.accessPolicies, new GetAccessPolicies()); operations.setOperation(ServletOperationSet.Method.GET, Extension.json, Operation.allPolicies, new GetAllAccessPolicies()); operations.setOperation(ServletOperationSet.Method.GET, Extension.html, Operation.allPolicies, new GetHtmlAccessRules()); operations.setOperation(ServletOperationSet.Method.GET, Extension.json, Operation.supportedPrivileges, new SupportedPrivileges()); operations.setOperation(ServletOperationSet.Method.GET, Extension.json, Operation.restrictionNames, new RestrictionNames()); operations.setOperation(ServletOperationSet.Method.GET, Extension.json, Operation.principals, new GetPrincipals()); // POST operations.setOperation(ServletOperationSet.Method.POST, Extension.json, Operation.reorder, new ReorderOperation()); // PUT operations.setOperation(ServletOperationSet.Method.PUT, Extension.json, Operation.accessPolicy, new PutAccessPolicy()); // DELETE operations.setOperation(ServletOperationSet.Method.DELETE, Extension.json, Operation.accessPolicy, new RemoveAccessPolicy()); }
java
boolean attachmentsAreStructurallyDifferent( List<AttachmentModel> firstAttachments, List<AttachmentModel> otherAttachments ) { if( firstAttachments.size() != otherAttachments.size() ) { return true; } for( int i = 0; i < firstAttachments.size(); i++ ) { if( attachmentIsStructurallyDifferent( firstAttachments.get( i ), otherAttachments.get( i ) ) ) { return true; } } return false; }
java
public String getPlainTextContext(int fromPos, int toPos, String contents) { String text = contents.replace('\n', ' ').replace('\r', ' ').replace('\t', ' '); // calculate context region: int startContent = fromPos - contextSize; String prefix = "..."; String postfix = "..."; String markerPrefix = " "; if (startContent < 0) { prefix = ""; markerPrefix = ""; startContent = 0; } int endContent = toPos + contextSize; if (endContent > text.length()) { postfix = ""; endContent = text.length(); } StringBuilder marker = getMarker(fromPos, toPos, text.length() + prefix.length()); // now build context string plus marker: return prefix + text.substring(startContent, endContent) + postfix + '\n' + markerPrefix + marker.substring(startContent, endContent); }
python
def createExpenseItemsForVenueRental(request=None, datetimeTuple=None, rule=None, event=None): ''' For each Location or Room-related Repeated Expense Rule, look for Events in the designated time window that do not already have expenses associated with them. For hourly rental expenses, then generate new expenses that are associated with this rule. For non-hourly expenses, generate new expenses based on the non-overlapping intervals of days, weeks or months for which there is not already an ExpenseItem associated with the rule in question. ''' # These are used repeatedly, so they are put at the top submissionUser = getattr(request, 'user', None) rental_category = getConstant('financial__venueRentalExpenseCat') # Return the number of new expense items created generate_count = 0 # First, construct the set of rules that need to be checked for affiliated events rule_filters = Q(disabled=False) & Q(rentalRate__gt=0) & \ (Q(locationrentalinfo__isnull=False) | Q(roomrentalinfo__isnull=False)) if rule: rule_filters = rule_filters & Q(id=rule.id) rulesToCheck = RepeatedExpenseRule.objects.filter(rule_filters).distinct() # These are the filters place on Events that overlap the window in which # expenses are being generated. event_timefilters = Q() if datetimeTuple and len(datetimeTuple) == 2: timelist = list(datetimeTuple) timelist.sort() event_timefilters = event_timefilters & ( Q(startTime__gte=timelist[0]) & Q(startTime__lte=timelist[1]) ) if event: event_timefilters = event_timefilters & Q(id=event.id) # Now, we loop through the set of rules that need to be applied, then loop through the # Events in the window in question that occurred at the location indicated by the rule. for rule in rulesToCheck: venue = ( getattr(rule, 'location', None) if isinstance(rule, RoomRentalInfo) else getattr(rule, 'location', None) ) loc = getattr(venue, 'location') if isinstance(venue, Room) else venue event_locfilter = Q(room=venue) if isinstance(venue, Room) else Q(location=venue) # Find or create the TransactionParty associated with the location. loc_party = TransactionParty.objects.get_or_create( location=loc, defaults={'name': loc.name} )[0] if rule.advanceDays: if rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & \ Q(endTime__lte=timezone.now() + timedelta(days=rule.advanceDays)) elif rule.advanceDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & \ Q(startTime__lte=timezone.now() + timedelta(days=rule.advanceDays)) if rule.priorDays: if rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.end: event_timefilters = event_timefilters & \ Q(endTime__gte=timezone.now() - timedelta(days=rule.priorDays)) elif rule.priorDaysReference == RepeatedExpenseRule.MilestoneChoices.start: event_timefilters = event_timefilters & \ Q(startTime__gte=timezone.now() - timedelta(days=rule.priorDays)) if rule.startDate: event_timefilters = event_timefilters & Q( event__startTime__gte=timezone.now().replace( year=rule.startDate.year, month=rule.startDate.month, day=rule.startDate.day, hour=0, minute=0, second=0, microsecond=0, ) ) if rule.endDate: event_timefilters = event_timefilters & Q( event__startTime__lte=timezone.now().replace( year=rule.endDate.year, month=rule.endDate.month, day=rule.endDate.day, hour=0, minute=0, second=0, microsecond=0, ) ) # For construction of expense descriptions replacements = { 'type': _('Event/Series venue rental'), 'of': _('of'), 'location': venue.name, 'for': _('for'), } # Loop through Events for which there are not already directly allocated # expenses under this rule, and create new ExpenseItems for them depending # on whether the rule requires hourly expenses or non-hourly ones to # be generated. events = Event.objects.filter(event_locfilter & event_timefilters).exclude( Q(expenseitem__expenseRule=rule)).distinct() if rule.applyRateRule == rule.RateRuleChoices.hourly: for this_event in events: # Hourly expenses are always generated without checking for # overlapping windows, because the periods over which hourly expenses # are defined are disjoint. However, hourly expenses are allocated # directly to events, so we just need to create expenses for any events # that do not already have an Expense Item generate under this rule. replacements['name'] = this_event.name replacements['dates'] = this_event.localStartTime.strftime('%Y-%m-%d') if ( event.localStartTime.strftime('%Y-%m-%d') != \ this_event.localEndTime.strftime('%Y-%m-%d') ): replacements['dates'] += ' %s %s' % ( _('to'), this_event.localEndTime.strftime('%Y-%m-%d') ) ExpenseItem.objects.create( event=this_event, category=rental_category, payTo=loc_party, expenseRule=rule, description='%(type)s %(of)s %(location)s %(for)s: %(name)s, %(dates)s' % \ replacements, submissionUser=submissionUser, total=this_event.duration * rule.rentalRate, accrualDate=this_event.startTime, ) generate_count += 1 else: # Non-hourly expenses are generated by constructing the time # intervals in which the occurrence occurs, and removing from that # interval any intervals in which an expense has already been # generated under this rule (so, for example, monthly rentals will # now show up multiple times). So, we just need to construct the set # of intervals for which to construct expenses intervals = [ (x.localStartTime, x.localEndTime) for x in \ EventOccurrence.objects.filter(event__in=events) ] remaining_intervals = rule.getWindowsAndTotals(intervals) for startTime, endTime, total, description in remaining_intervals: replacements['when'] = description ExpenseItem.objects.create( category=rental_category, payTo=loc_party, expenseRule=rule, periodStart=startTime, periodEnd=endTime, description='%(type)s %(of)s %(location)s %(for)s %(when)s' % replacements, submissionUser=submissionUser, total=total, accrualDate=startTime, ) generate_count += 1 rulesToCheck.update(lastRun=timezone.now()) return generate_count
python
def get_rectangle(self): """Gets the coordinates of the rectangle, in which the tree can be put. Returns: tupel: (x1, y1, x2, y2) """ rec = [self.pos[0], self.pos[1]]*2 for age in self.nodes: for node in age: # Check max/min for x/y coords for i in range(2): if rec[0+i] > node.pos[i]: rec[0+i] = node.pos[i] elif rec[2+i] < node.pos[i]: rec[2+i] = node.pos[i] return tuple(rec)
python
def retrieve(self, id) : """ Retrieve a single reason Returns a single loss reason available to the user by the provided id If a loss reason with the supplied unique identifier does not exist, it returns an error :calls: ``get /loss_reasons/{id}`` :param int id: Unique identifier of a LossReason. :return: Dictionary that support attriubte-style access and represent LossReason resource. :rtype: dict """ _, _, loss_reason = self.http_client.get("/loss_reasons/{id}".format(id=id)) return loss_reason
python
def _get_random_fp_raw(): ''' 生成随机的原始指纹列表 ''' fp_list = [] fp_list.append(get_random_ua()) # ua fp_list.append('zh-CN') # language fp_list.append('24') # color depth fp_list.append(__get_random_screen_resolution()) fp_list.append('-480') # time zone offsite fp_list.append('true') # session storage fp_list.append('true') # local storage fp_list.append('true') # indexed db fp_list.append('') # add behavior fp_list.append('function') # open database fp_list.append('') # cpu class fp_list.append('MacIntel') # platform fp_list.append('') # do not track fp_list.append( 'Widevine Content Decryption Module::Enables Widevine \ licenses for playback of HTML audio/video content. \ (version: 1.4.8.962)::application/x-ppapi-widevine-cdm~;' ) # plugin string return fp_list
python
def _ParseInsserv(self, data): """/etc/insserv.conf* entries define system facilities. Full format details are in man 8 insserv, but the basic structure is: $variable facility1 facility2 $second_variable facility3 $variable Any init script that specifies Required-Start: $second_variable needs to be expanded to facility1 facility2 facility3. Args: data: A string of insserv definitions. """ p = config_file.FieldParser() entries = p.ParseEntries(data) raw = {e[0]: e[1:] for e in entries} # Now expand out the facilities to services. facilities = {} for k, v in iteritems(raw): # Remove interactive tags. k = k.replace("<", "").replace(">", "") facilities[k] = v for k, vals in iteritems(facilities): self.insserv[k] = [] for v in vals: self.insserv[k].extend(self._InsservExpander(facilities, v))
python
def get_rfu(): """ Returns a list of al "regular file urls" for all plugins. """ global _rfu if _rfu: return _rfu plugins = plugins_base_get() rfu = [] for plugin in plugins: if isinstance(plugin.regular_file_url, str): rfu.append(plugin.regular_file_url) else: rfu += plugin.regular_file_url _rfu = rfu return rfu
python
def FetchRequestsAndResponses(self, session_id, timestamp=None): """Fetches all outstanding requests and responses for this flow. We first cache all requests and responses for this flow in memory to prevent round trips. Args: session_id: The session_id to get the requests/responses for. timestamp: Tuple (start, end) with a time range. Fetched requests and responses will have timestamp in this range. Yields: an tuple (request protobufs, list of responses messages) in ascending order of request ids. Raises: MoreDataException: When there is more data available than read by the limited query. """ if timestamp is None: timestamp = (0, self.frozen_timestamp or rdfvalue.RDFDatetime.Now()) num_requests = 0 for request, responses in self.data_store.ReadRequestsAndResponses( session_id, timestamp=timestamp, request_limit=self.request_limit, response_limit=self.response_limit): yield (request, responses) num_requests += 1 if num_requests >= self.request_limit: raise MoreDataException()
java
private Buffer consumeUntil(final Buffer name) { try { while (this.params.hasReadableBytes()) { SipParser.consumeSEMI(this.params); final Buffer[] keyValue = SipParser.consumeGenericParam(this.params); ensureParamsMap(); final Buffer value = keyValue[1] == null ? Buffers.EMPTY_BUFFER : keyValue[1]; this.paramMap.put(keyValue[0], value); if (name != null && name.equals(keyValue[0])) { return value; } } return null; } catch (final IndexOutOfBoundsException e) { throw new SipParseException(this.params.getReaderIndex(), "Unable to process the value due to a IndexOutOfBoundsException", e); } catch (final IOException e) { throw new SipParseException(this.params.getReaderIndex(), "Could not read from the underlying stream while parsing the value"); } }
java
public static <T> T max(Collection<? extends T> coll, Comparator<? super T> comp) { return Collections.max(coll, comp); }
python
def export(self, path, epoch=0): """Export HybridBlock to json format that can be loaded by `SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface. .. note:: When there are only one input, it will have name `data`. When there Are more than one inputs, they will be named as `data0`, `data1`, etc. Parameters ---------- path : str Path to save model. Two files `path-symbol.json` and `path-xxxx.params` will be created, where xxxx is the 4 digits epoch number. epoch : int Epoch number of saved model. """ if not self._cached_graph: raise RuntimeError( "Please first call block.hybridize() and then run forward with " "this block at least once before calling export.") sym = self._cached_graph[1] sym.save('%s-symbol.json'%path) arg_names = set(sym.list_arguments()) aux_names = set(sym.list_auxiliary_states()) arg_dict = {} for name, param in self.collect_params().items(): if name in arg_names: arg_dict['arg:%s'%name] = param._reduce() else: assert name in aux_names arg_dict['aux:%s'%name] = param._reduce() ndarray.save('%s-%04d.params'%(path, epoch), arg_dict)
java
public FacesMessage getFacesMessage() { FacesMessage result = this.message; if (null == result && null != this.messages && !this.messages.isEmpty()) { result = messages.iterator().next(); } return result; }
python
def new(self, array): """ Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]> """ assert len(self.array) == len(array) arr = object.__new__(self.__class__) arr.dt = self.dt arr.slicedic = self.slicedic arr.array = array return arr
java
private void read(int bit , int row , int col ) { int value = gridReader.readBit(row,col); if( value == -1 ) { // The requested region is outside the image. A partial QR code can be read so let's just // assign it a value of zero and let error correction handle this value = 0; } bits.set(bit,value); }
java
public static void doFile(File f) throws Exception { // If this is a directory, walk each file/dir in that directory if (f.isDirectory()) { String files[] = f.list(); for(int i=0; i < files.length; i++) doFile(new File(f, files[i])); } // otherwise, if this is a groovy file, parse it! else if (f.getName().endsWith(".groovy")) { System.err.println(" --- "+f.getAbsolutePath()); // parseFile(f.getName(), new FileInputStream(f)); SourceBuffer sourceBuffer = new SourceBuffer(); UnicodeEscapingReader unicodeReader = new UnicodeEscapingReader(new FileReader(f),sourceBuffer); GroovyLexer lexer = new GroovyLexer(unicodeReader); unicodeReader.setLexer(lexer); parseFile(f.getName(),lexer,sourceBuffer); } } // Here's where we do the real work... public static void parseFile(String f, GroovyLexer l, SourceBuffer sourceBuffer) throws Exception { try { // Create a parser that reads from the scanner GroovyRecognizer parser = GroovyRecognizer.make(l); parser.setSourceBuffer(sourceBuffer); parser.setFilename(f); if (whitespaceIncluded) { GroovyLexer lexer = parser.getLexer(); lexer.setWhitespaceIncluded(true); while (true) { Token t = lexer.nextToken(); System.out.println(t); if (t == null || t.getType() == Token.EOF_TYPE) break; } return; } // start parsing at the compilationUnit rule parser.compilationUnit(); System.out.println("parseFile "+f+" => "+parser.getAST()); // do something with the tree doTreeAction(f, parser.getAST(), parser.getTokenNames()); }
java
public void setXCSizeD(Integer newXCSizeD) { Integer oldXCSizeD = xcSizeD; xcSizeD = newXCSizeD; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.IID__XC_SIZE_D, oldXCSizeD, xcSizeD)); }
python
async def _request( self, method: str, endpoint: str, *, headers: dict = None, params: dict = None, json: dict = None, ssl: bool = True) -> dict: """Wrap the generic request method to add access token, etc.""" return await self._client_request( method, '{0}/{1}'.format(self._host, endpoint), access_token=self._access_token, access_token_expiration=self._access_token_expiration, headers=headers, params=params, json=json, ssl=ssl)
java
public String convertIfcSanitaryTerminalTypeEnumToString(EDataType eDataType, Object instanceValue) { return instanceValue == null ? null : instanceValue.toString(); }
python
def readStoredSms(self, index, memory=None): """ Reads and returns the SMS message at the specified index :param index: The index of the SMS message in the specified memory :type index: int :param memory: The memory type to read from. If None, use the current default SMS read memory :type memory: str or None :raise CommandError: if unable to read the stored message :return: The SMS message :rtype: subclass of gsmmodem.modem.Sms (either ReceivedSms or StatusReport) """ # Switch to the correct memory type if required self._setSmsMemory(readDelete=memory) msgData = self.write('AT+CMGR={0}'.format(index)) # Parse meta information if self._smsTextMode: cmgrMatch = self.CMGR_SM_DELIVER_REGEX_TEXT.match(msgData[0]) if cmgrMatch: msgStatus, number, msgTime = cmgrMatch.groups() msgText = '\n'.join(msgData[1:-1]) return ReceivedSms(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], number, parseTextModeTimeStr(msgTime), msgText) else: # Try parsing status report cmgrMatch = self.CMGR_SM_REPORT_REGEXT_TEXT.match(msgData[0]) if cmgrMatch: msgStatus, reference, number, sentTime, deliverTime, deliverStatus = cmgrMatch.groups() if msgStatus.startswith('"'): msgStatus = msgStatus[1:-1] if len(msgStatus) == 0: msgStatus = "REC UNREAD" return StatusReport(self, Sms.TEXT_MODE_STATUS_MAP[msgStatus], int(reference), number, parseTextModeTimeStr(sentTime), parseTextModeTimeStr(deliverTime), int(deliverStatus)) else: raise CommandError('Failed to parse text-mode SMS message +CMGR response: {0}'.format(msgData)) else: cmgrMatch = self.CMGR_REGEX_PDU.match(msgData[0]) if not cmgrMatch: raise CommandError('Failed to parse PDU-mode SMS message +CMGR response: {0}'.format(msgData)) stat, alpha, length = cmgrMatch.groups() try: stat = int(stat) except Exception: # Some modems (ZTE) do not always read return status - default to RECEIVED UNREAD stat = Sms.STATUS_RECEIVED_UNREAD pdu = msgData[1] smsDict = decodeSmsPdu(pdu) if smsDict['type'] == 'SMS-DELIVER': return ReceivedSms(self, int(stat), smsDict['number'], smsDict['time'], smsDict['text'], smsDict['smsc']) elif smsDict['type'] == 'SMS-STATUS-REPORT': return StatusReport(self, int(stat), smsDict['reference'], smsDict['number'], smsDict['time'], smsDict['discharge'], smsDict['status']) else: raise CommandError('Invalid PDU type for readStoredSms(): {0}'.format(smsDict['type']))
java
public void setColumnClassName(final String columnClassName) throws ClassNotFoundException { if (columnClassName.equals("byte[]")) { setColumnClass(Object.class); } else { setColumnClass(Class.forName(columnClassName)); } this.columnClassName = columnClassName; }
java
public void handleConnect(HttpServletRequest request, HttpServletResponse response) throws IOException { String uri = request.getRequestURI(); context.log("CONNECT: "+uri); InetAddrPort addrPort=new InetAddrPort(uri); //if (isForbidden(HttpMessage.__SSL_SCHEME,addrPort.getHost(),addrPort.getPort(),false)) //{ // sendForbid(request,response,uri); //} //else { InputStream in=request.getInputStream(); OutputStream out=response.getOutputStream(); Socket socket = new Socket(addrPort.getInetAddress(),addrPort.getPort()); context.log("Socket: "+socket); response.setStatus(200); response.setHeader("Connection","close"); response.flushBuffer(); System.err.println(response); context.log("out<-in"); IO.copyThread(socket.getInputStream(),out); context.log("in->out"); IO.copy(in,socket.getOutputStream()); } }
python
def root_and_children_to_graph(self,root): """Take a root node and its children and make them into graphs""" g = Graph() g.add_node(root) edges = [] edges += self.get_node_edges(root,"outgoing") for c in self.get_children(root): g.add_node(c) edges += self.get_node_edges(c,"outgoing") for e in edges: g.add_edge(e) return g
python
def summarize(self, geom, stat=None): """Returns a new RasterQuerySet with subsetted/summarized ndarrays. Arguments: geom -- geometry for masking or spatial subsetting Keyword args: stat -- any numpy summary stat method as str (min/max/mean/etc) """ if not hasattr(geom, 'num_coords'): raise TypeError('Need OGR or GEOS geometry, %s found' % type(geom)) clone = self._clone() for obj in clone: arr = obj.array(geom) if arr is not None: if stat: arr = agg_dims(arr, stat) try: arr = arr.squeeze() except ValueError: pass obj.image = arr return clone
java
public static String replaceNorwegianLetters(CharSequence text) { Pattern norwegianLettersPattern = Pattern.compile("\u00F8|\u00D8|\u00E6|\u00C6|\u00E5|\u00C5"); Matcher norwegianLetterMatcher = norwegianLettersPattern.matcher(text); StringBuffer replacedText = new StringBuffer(); while (norwegianLetterMatcher.find()) { norwegianLetterMatcher.appendReplacement(replacedText, replacements.get(norwegianLetterMatcher.group())); } norwegianLetterMatcher.appendTail(replacedText); return replacedText.toString(); }
java
public static <T> QueueFactory<T> boundedNonBlockingQueue(final int queueSize, final WaitStrategy<T> strategy) { return () -> new Queue<T>( new ManyToOneConcurrentArrayQueue<>( queueSize), strategy, strategy); }
python
def create_tcp_monitor(self, topics, batch_size=1, batch_duration=0, compression='gzip', format_type='json'): """Creates a TCP Monitor instance in Device Cloud for a given list of topics :param topics: a string list of topics (e.g. ['DeviceCore[U]', 'FileDataCore']). :param batch_size: How many Msgs received before sending data. :param batch_duration: How long to wait before sending batch if it does not exceed batch_size. :param compression: Compression value (i.e. 'gzip'). :param format_type: What format server should send data in (i.e. 'xml' or 'json'). Returns an object of the created Monitor """ monitor_xml = """\ <Monitor> <monTopic>{topics}</monTopic> <monBatchSize>{batch_size}</monBatchSize> <monFormatType>{format_type}</monFormatType> <monTransportType>tcp</monTransportType> <monCompression>{compression}</monCompression> </Monitor> """.format( topics=','.join(topics), batch_size=batch_size, batch_duration=batch_duration, format_type=format_type, compression=compression, ) monitor_xml = textwrap.dedent(monitor_xml) response = self._conn.post("/ws/Monitor", monitor_xml) location = ET.fromstring(response.text).find('.//location').text monitor_id = int(location.split('/')[-1]) return TCPDeviceCloudMonitor(self._conn, monitor_id, self._tcp_client_manager)
python
def _build_url(url, _params): """Build the actual URL to use.""" # Support for unicode domain names and paths. scheme, netloc, path, params, query, fragment = urlparse(url) netloc = netloc.encode('idna').decode('utf-8') if not path: path = '/' if six.PY2: if isinstance(scheme, six.text_type): scheme = scheme.encode('utf-8') if isinstance(netloc, six.text_type): netloc = netloc.encode('utf-8') if isinstance(path, six.text_type): path = path.encode('utf-8') if isinstance(params, six.text_type): params = params.encode('utf-8') if isinstance(query, six.text_type): query = query.encode('utf-8') if isinstance(fragment, six.text_type): fragment = fragment.encode('utf-8') enc_params = _encode_params(_params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = (urlunparse([scheme, netloc, path, params, query, fragment])) return url
java
protected final Table createBlankTable(HsqlName name) { Table table = new Table(database, name, TableBase.SYSTEM_TABLE); return table; }
java
public static ZoneTransferIn newAXFR(Name zone, String host, int port, TSIG key) throws UnknownHostException { if (port == 0) port = SimpleResolver.DEFAULT_PORT; return newAXFR(zone, new InetSocketAddress(host, port), key); }
java
private void handleStorageException( final StorageException exs, final CloudStorageRetryHandler retryHandler) throws IOException { boolean shouldReopen = retryHandler.handleStorageException(exs); if (shouldReopen) { // these errors aren't marked as retryable since the channel is closed; // but here at this higher level we can retry them. innerOpen(); } }
java
private RelationType getRelationType(int value) { RelationType result = null; if (value >= 0 || value < RELATION_TYPES.length) { result = RELATION_TYPES[value]; } if (result == null) { result = RelationType.FINISH_START; } return result; }
java
private void updateFederatedManagerService() { if (!activated) { return; } if (profileManager.getReference() != null) { Tr.info(tc, "FEDERATED_MANAGER_SERVICE_READY"); } else { if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Some required federated manager services are not available."); } } }
python
def get_common_name(self): ''' Get a flower's common name ''' name = random.choice(self.common_first) if random.randint(0, 1) == 1: name += ' ' + random.choice(self.common_first).lower() name += ' ' + random.choice(self.common_second).lower() return name