language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def present(name, type, url, access=None, user=None, password=None, database=None, basic_auth=None, basic_auth_user=None, basic_auth_password=None, tls_auth=None, json_data=None, is_default=None, with_credentials=None, type_logo_url=None, orgname=None, profile='grafana'): ''' Ensure that a data source is present. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. Default: proxy url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basic_auth Optional - set to True to use HTTP basic auth to authenticate with the data source. basic_auth_user Optional - HTTP basic auth username. basic_auth_password Optional - HTTP basic auth password. json_data Optional - additional json data to post (eg. "timeInterval"). is_default Optional - set data source as default. with_credentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. type_logo_url Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be present. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) ret = {'name': name, 'result': None, 'comment': None, 'changes': {}} datasource = __salt__['grafana4.get_datasource'](name, orgname, profile) data = _get_json_data( name=name, type=type, url=url, access=access, user=user, password=password, database=database, basicAuth=basic_auth, basicAuthUser=basic_auth_user, basicAuthPassword=basic_auth_password, tlsAuth=tls_auth, jsonData=json_data, isDefault=is_default, withCredentials=with_credentials, typeLogoUrl=type_logo_url, defaults=datasource) if not datasource: if __opts__['test']: ret['comment'] = 'Datasource {0} will be created'.format(name) return ret __salt__['grafana4.create_datasource'](profile=profile, **data) datasource = __salt__['grafana4.get_datasource'](name, profile=profile) ret['result'] = True ret['comment'] = 'New data source {0} added'.format(name) ret['changes'] = data return ret # At this stage, the datasource exists; however, the object provided by # Grafana may lack some null keys compared to our "data" dict: for key in data: if key not in datasource: datasource[key] = None if data == datasource: ret['comment'] = 'Data source {0} already up-to-date'.format(name) return ret if __opts__['test']: ret['comment'] = 'Datasource {0} will be updated'.format(name) return ret __salt__['grafana4.update_datasource']( datasource['id'], profile=profile, **data) ret['result'] = True ret['changes'] = deep_diff(datasource, data, ignore=['id', 'orgId', 'readOnly']) ret['comment'] = 'Data source {0} updated'.format(name) return ret
java
public static <T> Iterator<T> drop(final Iterator<T> iterator, final int count) { if (iterator == null) throw new NullPointerException("iterator"); if (count == 0) return iterator; if (count < 0) throw new IllegalArgumentException("Cannot drop a negative number of elements. Argument 'count' was: " + count); return new AbstractIterator<T>() { { int i = count; while (i > 0 && iterator.hasNext()) { iterator.next(); i--; } } @Override protected T computeNext() { if (!iterator.hasNext()) return endOfData(); return iterator.next(); } }; }
java
private void resize(Object[] oldTable, int newSize) { int oldCapacity = oldTable.length; int end = oldCapacity - 1; Object last = arrayAt(oldTable, end); if (this.size() < ((end*3) >> 2) && last == RESIZE_SENTINEL) { return; } if (oldCapacity >= MAXIMUM_CAPACITY) { throw new RuntimeException("max capacity of map exceeded"); } ResizeContainer resizeContainer = null; boolean ownResize = false; if (last == null || last == RESIZE_SENTINEL) { synchronized (oldTable) // allocating a new array is too expensive to make this an atomic operation { if (arrayAt(oldTable, end) == null) { setArrayAt(oldTable, end, RESIZE_SENTINEL); resizeContainer = new ResizeContainer(allocateTable(newSize), oldTable.length - 2); setArrayAt(oldTable, end, resizeContainer); ownResize = true; } } } if (ownResize) { this.transfer(oldTable, resizeContainer); Object[] src = this.table; while (!TABLE_UPDATER.compareAndSet(this, oldTable, resizeContainer.nextArray)) { // we're in a double resize situation; we'll have to go help until it's our turn to set the table if (src != oldTable) { this.helpWithResize(src); } } } else { this.helpWithResize(oldTable); } }
python
def raw_clean(self, datas): """ Apply a cleaning on raw datas. """ datas = strip_tags(datas) # Remove HTML datas = STOP_WORDS.rebase(datas, '') # Remove STOP WORDS datas = PUNCTUATION.sub('', datas) # Remove punctuation datas = datas.lower() return [d for d in datas.split() if len(d) > 1]
python
def _safe_string(self, source, encoding='utf-8'): """Convert unicode to string as gnomekeyring barfs on unicode""" if not isinstance(source, str): return source.encode(encoding) return str(source)
python
def pause(name): ''' Pauses a container name Container name or ID **RETURN DATA** A dictionary will be returned, containing the following keys: - ``status`` - A dictionary showing the prior state of the container as well as the new state - ``result`` - A boolean noting whether or not the action was successful - ``comment`` - Only present if the container cannot be paused CLI Example: .. code-block:: bash salt myminion docker.pause mycontainer ''' orig_state = state(name) if orig_state == 'stopped': return {'result': False, 'state': {'old': orig_state, 'new': orig_state}, 'comment': ('Container \'{0}\' is stopped, cannot pause' .format(name))} return _change_state(name, 'pause', 'paused')
python
def get_id(self, natural_key, date, enhancement=None): """ Returns the technical ID for a natural key at a date or None if the given natural key is not valid. :param T natural_key: The natural key. :param str date: The date in ISO 8601 (YYYY-MM-DD) format. :param T enhancement: Enhancement data of the dimension row. :rtype: int|None """ if not date: return None # If the natural key is known return the technical ID immediately. if natural_key in self._map: for row in self._map[natural_key]: if row[0] <= date <= row[1]: return row[2] # The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key # to a technical key. self.pre_call_stored_procedure() success = False try: row = self.call_stored_procedure(natural_key, date, enhancement) # Convert dates to strings in ISO 8601 format. if isinstance(row[self._key_date_start], datetime.date): row[self._key_date_start] = row[self._key_date_start].isoformat() if isinstance(row[self._key_date_end], datetime.date): row[self._key_date_end] = row[self._key_date_end].isoformat() success = True finally: self.post_call_stored_procedure(success) # Make sure the natural key is in the map. if natural_key not in self._map: self._map[natural_key] = [] if row[self._key_key]: self._map[natural_key].append((row[self._key_date_start], row[self._key_date_end], row[self._key_key])) else: self._map[natural_key].append((date, date, None)) return row[self._key_key]
python
def get_next(cls, task, releasetype, typ, descriptor=None): """Returns a TaskFileInfo that with the next available version and the provided info :param task: the task of the taskfile :type task: :class:`jukeboxcore.djadapter.models.Task` :param releasetype: the releasetype :type releasetype: str - :data:`jukeboxcore.djadapter.RELEASETYPES` :param typ: the file type, see :data:`TaskFileInfo.TYPES` :type typ: str :param descriptor: the descriptor, if the taskfile has one. :type descriptor: str|None :returns: taskfileinfoobject with next available version and the provided info :rtype: :class:`TaskFileInfo` :raises: None """ qs = dj.taskfiles.filter(task=task, releasetype=releasetype, descriptor=descriptor, typ=typ) if qs.exists(): ver = qs.aggregate(Max('version'))['version__max']+1 else: ver = 1 return TaskFileInfo(task=task, version=ver, releasetype=releasetype, typ=typ, descriptor=descriptor)
python
def read_config_environment(self, config_data=None, quiet=False): """read_config_environment is the second effort to get a username and key to authenticate to the Kaggle API. The environment keys are equivalent to the kaggle.json file, but with "KAGGLE_" prefix to define a unique namespace. Parameters ========== config_data: a partially loaded configuration dictionary (optional) quiet: suppress verbose print of output (default is False) """ # Add all variables that start with KAGGLE_ to config data if config_data is None: config_data = {} for key, val in os.environ.items(): if key.startswith('KAGGLE_'): config_key = key.replace('KAGGLE_', '', 1).lower() config_data[config_key] = val return config_data
java
@Trivial void initForRepeatingTask(boolean isFixedRate, long initialDelay, long interval) { this.initialDelay = initialDelay; this.interval = interval; this.isFixedRate = isFixedRate; }
python
def by_external_id_and_provider(cls, external_id, provider_name, db_session=None): """ Returns ExternalIdentity instance based on search params :param external_id: :param provider_name: :param db_session: :return: ExternalIdentity """ db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.external_id == external_id) query = query.filter(cls.model.provider_name == provider_name) return query.first()
python
def getImemb(self): """Gather membrane currents from PtrVector into imVec (does not need a loop!)""" self.imembPtr.gather(self.imembVec) return self.imembVec.as_numpy()
python
def yesterday(date=None): """yesterday once more""" if not date: return _date - datetime.timedelta(days=1) else: current_date = parse(date) return current_date - datetime.timedelta(days=1)
java
public void replaceStringChildren(List<String> strings, String parentId) { ArrayList<StringEntity> entities = new ArrayList<>(); for (String string : strings) { if (string != null) { StringEntity entity = new StringEntity(); entity.setParentId(parentId); entity.setValue(string); entities.add(entity); } } replaceChildren(entities, parentId); }
java
@Override protected void checkIfHeartbeatSkipped(String name, EventChannelStruct eventChannelStruct) { // Check if heartbeat have been skipped, can happen if // 1- the notifd is dead (if not ZMQ) // 2- the server is dead // 3- The network was down; // 4- The server has been restarted on another host. // long now = System.currentTimeMillis(); // boolean heartbeat_skipped = // ((now - eventChannelStruct.last_heartbeat) > KeepAliveThread.getHeartBeatPeriod()); if (KeepAliveThread.heartbeatHasBeenSkipped(eventChannelStruct) || eventChannelStruct.heartbeat_skipped || eventChannelStruct.notifd_failed) { eventChannelStruct.heartbeat_skipped = true; // Check notifd by trying to read an attribute of the event channel DevError dev_error = null; try { eventChannelStruct.eventChannel.MyFactory(); // Check if DS is now running on another host if (checkIfHostHasChanged(eventChannelStruct)) eventChannelStruct.notifd_failed = true; } catch (RuntimeException e1) { // MyFactory has failed dev_error = new DevError(); dev_error.severity = ErrSeverity.ERR; dev_error.origin = "NotifdEventConsumer.checkIfHeartbeatSkipped()"; dev_error.reason = "API_EventException"; dev_error.desc = "Connection failed with notify daemon"; // Try to add reason int pos = e1.toString().indexOf(":"); if (pos > 0) dev_error.desc += " (" + e1.toString().substring(0, pos) + ")"; eventChannelStruct.notifd_failed = true; // reset the event import info stored in DeviceProxy object // Until today, this feature is used only by Astor (import with external info). try { DeviceProxyFactory.get(name, eventChannelStruct.dbase.getUrl().getTangoHost()).set_evt_import_info(null); } catch (DevFailed e) { System.err.println("API received a DevFailed : " + e.errors[0].desc); } } // Force to reconnect if not using database if (!eventChannelStruct.use_db) eventChannelStruct.notifd_failed = true; // Check if has_notifd_closed_the_connection many times (nework blank) if (!eventChannelStruct.notifd_failed && eventChannelStruct.has_notifd_closed_the_connection >= 3) eventChannelStruct.notifd_failed = true; // If notifd_failed --> try to reconnect if (eventChannelStruct.notifd_failed) { eventChannelStruct.notifd_failed = !reconnect_to_channel(name); if (!eventChannelStruct.notifd_failed) reconnect_to_event(name); } Enumeration callback_structs = EventConsumer.getEventCallbackMap().elements(); while (callback_structs.hasMoreElements()) { EventCallBackStruct callback_struct = (EventCallBackStruct) callback_structs.nextElement(); if (callback_struct.channel_name.equals(name)) { // Push exception if (dev_error != null) pushReceivedException(eventChannelStruct, callback_struct, dev_error); else pushServerNotRespondingException(eventChannelStruct, callback_struct); // If reconnection done, try to re subscribe // and read attribute in synchronous mode if (!callback_struct.event_name.equals(eventNames[DATA_READY_EVENT])) if (!eventChannelStruct.notifd_failed) if (eventChannelStruct.consumer.reSubscribe(eventChannelStruct, callback_struct)) readAttributeAndPush(eventChannelStruct, callback_struct); } } }// end if heartbeat_skipped else eventChannelStruct.has_notifd_closed_the_connection = 0; }
python
def generate_regular_range(start, end, periods, freq): """ Generate a range of dates with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timestamp or None first point of produced date range end : Timestamp or None last point of produced date range periods : int number of periods in produced date range freq : DateOffset describes space between dates in produced date range Returns ------- ndarray[np.int64] representing nanosecond unix timestamps """ if isinstance(freq, Tick): stride = freq.nanos if periods is None: b = Timestamp(start).value # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 e = (b + (Timestamp(end).value - b) // stride * stride + stride // 2 + 1) # end.tz == start.tz by this point due to _generate implementation tz = start.tz elif start is not None: b = Timestamp(start).value e = _generate_range_overflow_safe(b, periods, stride, side='start') tz = start.tz elif end is not None: e = Timestamp(end).value + stride b = _generate_range_overflow_safe(e, periods, stride, side='end') tz = end.tz else: raise ValueError("at least 'start' or 'end' should be specified " "if a 'period' is given.") with np.errstate(over="raise"): # If the range is sufficiently large, np.arange may overflow # and incorrectly return an empty array if not caught. try: values = np.arange(b, e, stride, dtype=np.int64) except FloatingPointError: xdr = [b] while xdr[-1] != e: xdr.append(xdr[-1] + stride) values = np.array(xdr[:-1], dtype=np.int64) else: tz = None # start and end should have the same timezone by this point if start is not None: tz = start.tz elif end is not None: tz = end.tz xdr = generate_range(start=start, end=end, periods=periods, offset=freq) values = np.array([x.value for x in xdr], dtype=np.int64) return values, tz
java
public OutlierResult run(Relation<V> relation) { final DBIDs ids = relation.getDBIDs(); KNNQuery<V> knnQuery = QueryUtil.getKNNQuery(relation, getDistanceFunction(), k + 1); final int dim = RelationUtil.dimensionality(relation); if(k <= dim + 1) { LOG.warning("PCA is underspecified with a too low k! k should be at much larger than " + dim); } WritableDoubleDataStore cop_score = DataStoreUtil.makeDoubleStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_STATIC); WritableDataStore<double[]> cop_err_v = models ? DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_STATIC, double[].class) : null; WritableIntegerDataStore cop_dim = models ? DataStoreUtil.makeIntegerStorage(ids, DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_STATIC, -1) : null; // compute neighbors of each db object FiniteProgress prog = LOG.isVerbose() ? new FiniteProgress("Correlation Outlier Probabilities", relation.size(), LOG) : null; double[] centroid = new double[dim]; double[] scores = new double[dim]; ModifiableDBIDs nids = DBIDUtil.newHashSet(k + 10); for(DBIDIter id = ids.iter(); id.valid(); id.advance()) { nids.clear(); nids.addDBIDs(knnQuery.getKNNForDBID(id, k + 1)); nids.remove(id); // Do not use query object computeCentroid(centroid, relation, nids); PCAResult pcares = pca.processIds(nids, relation); double[][] tevecs = pcares.getEigenvectors(); double[] evs = pcares.getEigenvalues(); double[] projected = times(tevecs, minusEquals(relation.get(id).toArray(), centroid)); if(dist == DistanceDist.CHISQUARED) { double sqdevs = 0; for(int d = 0; d < dim; d++) { double dev = projected[d]; // Scale with variance and accumulate sqdevs += dev * dev / evs[d]; scores[d] = 1 - ChiSquaredDistribution.cdf(sqdevs, d + 1); } } else { assert (dist == DistanceDist.GAMMA); double[][] dists = new double[dim][nids.size()]; int j = 0; double[] srel = new double[dim]; for(DBIDIter s = nids.iter(); s.valid() && j < nids.size(); s.advance(), j++) { V vec = relation.get(s); for(int d = 0; d < dim; d++) { srel[d] = vec.doubleValue(d) - centroid[d]; } double sqdist = 0.0; for(int d = 0; d < dim; d++) { double serrd = transposeTimes(tevecs[d], srel); dists[d][j] = (sqdist += serrd * serrd / evs[d]); } } double sqdevs = 0; for(int d = 0; d < dim; d++) { // Scale with Stddev final double dev = projected[d]; // Accumulate sqdevs += dev * dev / evs[d]; // Sort, so we can trim the top 15% below. Arrays.sort(dists[d]); // Evaluate scores[d] = 1 - GammaChoiWetteEstimator.STATIC.estimate(dists[d], SHORTENED_ARRAY).cdf(sqdevs); } } // Find best score double min = Double.POSITIVE_INFINITY; int vdim = dim - 1; for(int d = 0; d < dim; d++) { double v = scores[d]; if(v < min) { min = v; vdim = d; } } // Normalize the value final double prob = expect * (1 - min) / (expect + min); cop_score.putDouble(id, prob); if(models) { // Construct the error vector: Arrays.fill(projected, vdim + 1, dim, 0.); cop_err_v.put(id, timesEquals(transposeTimes(tevecs, projected), -prob)); cop_dim.putInt(id, dim - vdim); } LOG.incrementProcessed(prog); } LOG.ensureCompleted(prog); // combine results. DoubleRelation scoreResult = new MaterializedDoubleRelation("Correlation Outlier Probabilities", COP_SCORES, cop_score, ids); OutlierScoreMeta scoreMeta = new ProbabilisticOutlierScore(); OutlierResult result = new OutlierResult(scoreMeta, scoreResult); if(models) { result.addChildResult(new MaterializedRelation<>("Local Dimensionality", COP_DIM, TypeUtil.INTEGER, cop_dim, ids)); result.addChildResult(new MaterializedRelation<>("Error vectors", COP_ERRORVEC, TypeUtil.DOUBLE_ARRAY, cop_err_v, ids)); } return result; }
java
public void write(Object from, File target) throws IOException { OutputStream outputStream = new FileOutputStream(target); try { write(from, outputStream); } finally { outputStream.close(); } }
java
protected String generateCacheKey( CmsObject cms, String targetSiteRoot, String detailPagePart, String absoluteLink) { return cms.getRequestContext().getSiteRoot() + ":" + targetSiteRoot + ":" + detailPagePart + absoluteLink; }
java
public Observable<ServiceResponse<Page<DomainOwnershipIdentifierInner>>> listOwnershipIdentifiersNextWithServiceResponseAsync(final String nextPageLink) { return listOwnershipIdentifiersNextSinglePageAsync(nextPageLink) .concatMap(new Func1<ServiceResponse<Page<DomainOwnershipIdentifierInner>>, Observable<ServiceResponse<Page<DomainOwnershipIdentifierInner>>>>() { @Override public Observable<ServiceResponse<Page<DomainOwnershipIdentifierInner>>> call(ServiceResponse<Page<DomainOwnershipIdentifierInner>> page) { String nextPageLink = page.body().nextPageLink(); if (nextPageLink == null) { return Observable.just(page); } return Observable.just(page).concatWith(listOwnershipIdentifiersNextWithServiceResponseAsync(nextPageLink)); } }); }
python
def interpretValue(value,*args,**kwargs): """Interprets a passed value. In this order: - If it's callable, call it with the parameters provided - If it's a tuple/list/dict and we have a single, non-kwarg parameter, look up that parameter within the tuple/list/dict - Else, just return it """ if callable(value): return value(*args,**kwargs) if isinstance(value,tuple) or isinstance(value,list) or isinstance(value,dict): if len(args)==1 and kwargs=={}: return value[args[0]] return value
java
public void putShortString(String value) { checkAvailable(value.length() + 1); Wire.putShortString(needle, value); }
python
def logging_file_install(path): """ Install logger that will write to file. If this function has already installed a handler, replace it. :param path: path to the log file, Use None for default file location. """ if path is None: path = configuration_get_default_folder() / LOGGING_DEFAULTNAME if not path.parent.exists(): log.error('File logger installation FAILED!') log.error('The directory of the log file does not exist.') return formatter = logging.Formatter(LOGGING_FORMAT) logger = logging.getLogger() logger.removeHandler(LOGGING_HANDLERS['file']) logFileHandler = logging.handlers.RotatingFileHandler(filename=str(path), mode='a', maxBytes=LOGGING_MAXBYTES, backupCount=LOGGING_BACKUPCOUNT) logFileHandler.setLevel(logging.DEBUG) logFileHandler.setFormatter(formatter) LOGGING_HANDLERS['file'] = logFileHandler logger.addHandler(logFileHandler)
java
private List<String> resolveVariable(String filter, String text) { List<String> ret = new ArrayList<>(); Matcher m = PATTERN.matcher(text); while (m.find()) { ParsedStatement statement = ParsedStatement.fromMatcher(m); if (statement != null && statement.getVariable().startsWith(filter)) { ret.add(statement.getVariable()); } } return ret; }
python
def calculate_size(name, permits): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += INT_SIZE_IN_BYTES return data_size
java
public void setFrom(String from) { try { this.from = new EndpointReference(new URI(from)); } catch (URISyntaxException e) { throw new CitrusRuntimeException("Invalid from uri", e); } }
java
protected String formatLogMessage(LogLevel level,Object[] message,Throwable throwable) { //get text String messageText=this.format(message); String throwableText=this.format(throwable); //init buffer StringBuilder buffer=new StringBuilder(); //append prefix buffer.append("[fax4j]["); buffer.append(level.getName()); buffer.append("] "); if(messageText!=null) { buffer.append(messageText); if(throwableText!=null) { buffer.append(Logger.SYSTEM_EOL); buffer.append(throwableText); } } else if(throwableText!=null) { buffer.append(throwableText); } //get text String text=buffer.toString(); return text; }
python
def conf_sets(self): '''The dictionary of configuration sets in this component, if any.''' with self._mutex: if not self._conf_sets: self._parse_configuration() return self._conf_sets
java
public void setupSFields() { this.getRecord(LogicFile.LOGIC_FILE_FILE).getField(LogicFile.SEQUENCE).setupDefaultView(this.getNextLocation(ScreenConstants.NEXT_LOGICAL, ScreenConstants.ANCHOR_DEFAULT), this, ScreenConstants.DEFAULT_DISPLAY); this.getRecord(LogicFile.LOGIC_FILE_FILE).getField(LogicFile.METHOD_NAME).setupDefaultView(this.getNextLocation(ScreenConstants.NEXT_LOGICAL, ScreenConstants.ANCHOR_DEFAULT), this, ScreenConstants.DEFAULT_DISPLAY); }
java
public TableColumnVisibility convertTableColumnVisibility(final String tableIdentifier, final String json) { final String[] split = this.splitColumns(json); final List<String> visibleColumns = new ArrayList<>(); final List<String> invisibleColumns = new ArrayList<>(); for (String column : split) { final String[] attribute = this.splitAttributes(column); final String identifier = attribute[0].split(":")[1]; final String visible = attribute[1].split(":")[1]; if (Boolean.valueOf(visible)) { visibleColumns.add(identifier); } else { invisibleColumns.add(identifier); } } return new TableColumnVisibility(tableIdentifier, visibleColumns, invisibleColumns); }
java
public static boolean contentEquals(File file1, File file2) throws IORuntimeException { boolean file1Exists = file1.exists(); if (file1Exists != file2.exists()) { return false; } if (false == file1Exists) { // 两个文件都不存在,返回true return true; } if (file1.isDirectory() || file2.isDirectory()) { // 不比较目录 throw new IORuntimeException("Can't compare directories, only files"); } if (file1.length() != file2.length()) { // 文件长度不同 return false; } if (equals(file1, file2)) { // 同一个文件 return true; } InputStream input1 = null; InputStream input2 = null; try { input1 = getInputStream(file1); input2 = getInputStream(file2); return IoUtil.contentEquals(input1, input2); } finally { IoUtil.close(input1); IoUtil.close(input2); } }
java
public List<String> getVariables() { return this.templateChunks.stream() .filter(templateChunk -> Expression.class.isAssignableFrom(templateChunk.getClass())) .map(templateChunk -> ((Expression) templateChunk).getName()) .filter(Objects::nonNull) .collect(Collectors.toList()); }
java
@Transformer public static String capFirst(Object o) { if (null == o) return ""; String string = o.toString(); if (string.length() == 0) { return string; } return ("" + string.charAt(0)).toUpperCase() + string.substring(1); }
java
public void marshall(GetSegmentVersionsRequest getSegmentVersionsRequest, ProtocolMarshaller protocolMarshaller) { if (getSegmentVersionsRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(getSegmentVersionsRequest.getApplicationId(), APPLICATIONID_BINDING); protocolMarshaller.marshall(getSegmentVersionsRequest.getPageSize(), PAGESIZE_BINDING); protocolMarshaller.marshall(getSegmentVersionsRequest.getSegmentId(), SEGMENTID_BINDING); protocolMarshaller.marshall(getSegmentVersionsRequest.getToken(), TOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public List<StringSource> createSources(String sourceFileName) { return Util.list(new StringSource(sourceFileName, source)); }
java
public void addObject (int x, int y, Object object) { Record record = new Record(x, y, object); // if this is the very first element, we have to insert it // straight away because our binary search algorithm doesn't work // on empty arrays if (_size == 0) { _records[_size++] = record; return; } // figure out where to insert it int ipoint = binarySearch(x); // expand the records array if necessary if (_size >= _records.length) { int nsize = _size*2; Record[] records = new Record[nsize]; System.arraycopy(_records, 0, records, 0, _size); _records = records; } // shift everything down if (ipoint < _size) { System.arraycopy(_records, ipoint, _records, ipoint+1, _size-ipoint); } // insert the record _records[ipoint] = record; _size++; }
python
def display_information_message_bar( title=None, message=None, more_details=None, button_text=tr('Show details ...'), duration=8, iface_object=iface): """ Display an information message bar. :param iface_object: The QGIS IFace instance. Note that we cannot use qgis.utils.iface since it is not available in our test environment. :type iface_object: QgisInterface :param title: The title of the message bar. :type title: basestring :param message: The message inside the message bar. :type message: basestring :param more_details: The message inside the 'Show details' button. :type more_details: basestring :param button_text: The text of the button if 'more_details' is not empty. :type button_text: basestring :param duration: The duration for the display, default is 8 seconds. :type duration: int """ iface_object.messageBar().clearWidgets() widget = iface_object.messageBar().createMessage(title, message) if more_details: button = QPushButton(widget) button.setText(button_text) button.pressed.connect( lambda: display_information_message_box( title=title, message=more_details)) widget.layout().addWidget(button) iface_object.messageBar().pushWidget(widget, Qgis.Info, duration)
java
public static DateTime toDateAdvanced(Object o, TimeZone timezone) throws PageException { if (o instanceof Date) { if (o instanceof DateTime) return (DateTime) o; return new DateTimeImpl((Date) o); } else if (o instanceof Castable) return ((Castable) o).castToDateTime(); else if (o instanceof String) { DateTime dt = toDateAdvanced((String) o, timezone, null); if (dt == null) throw new ExpressionException("can't cast [" + o + "] to date value"); return dt; } else if (o instanceof Number) return util.toDateTime(((Number) o).doubleValue()); else if (o instanceof ObjectWrap) return toDateAdvanced(((ObjectWrap) o).getEmbededObject(), timezone); else if (o instanceof Calendar) { return new DateTimeImpl((Calendar) o); } throw new ExpressionException("can't cast [" + Caster.toClassName(o) + "] to date value"); }
python
def readfile(filename): """ returns the content of a file :param filename: the filename :return: """ with open(path_expand(filename), 'r') as f: content = f.read() return content
java
protected DataHash calculateMac() throws KSIException { try { HashAlgorithm algorithm = HashAlgorithm.getByName("DEFAULT"); algorithm.checkExpiration(); return new DataHash(algorithm, Util.calculateHMAC(getContent(), this.loginKey, algorithm.getName())); } catch (IOException e) { throw new KSIProtocolException("Problem with HMAC", e); } catch (InvalidKeyException e) { throw new KSIProtocolException("Problem with HMAC key.", e); } catch (NoSuchAlgorithmException e) { // If the default algorithm changes to be outside of MD5 / SHA1 / // SHA256 list. throw new KSIProtocolException("Unsupported HMAC algorithm.", e); } catch (HashException e) { throw new KSIProtocolException(e.getMessage(), e); } }
python
def insert_level(df, label, level=0, copy=0, axis=0, level_name=None): """Add a new level to the index with the specified label. The newly created index will be a MultiIndex. :param df: DataFrame :param label: label to insert :param copy: If True, copy the DataFrame before assigning new index :param axis: If 0, then columns. If 1, then index :return: """ df = df if not copy else df.copy() src = df.columns if axis == 0 else df.index current = [src.get_level_values(lvl) for lvl in range(src.nlevels)] current.insert(level, [label] * len(src)) idx = pd.MultiIndex.from_arrays(current) level_name and idx.set_names(level_name, level, inplace=1) if axis == 0: df.columns = idx else: df.index = idx return df
python
def unset_access_cookies(response): """ takes a flask response object, and configures it to unset (delete) the access token from the response cookies. if `jwt_csrf_in_cookies` (see :ref:`configuration options`) is `true`, this will also remove the access csrf double submit value from the response cookies as well. :param response: the flask response object to delete the jwt cookies in. """ if not config.jwt_in_cookies: raise RuntimeWarning("unset_refresh_cookies() called without " "'JWT_TOKEN_LOCATION' configured to use cookies") response.set_cookie(config.access_cookie_name, value='', expires=0, secure=config.cookie_secure, httponly=True, domain=config.cookie_domain, path=config.access_cookie_path, samesite=config.cookie_samesite) if config.csrf_protect and config.csrf_in_cookies: response.set_cookie(config.access_csrf_cookie_name, value='', expires=0, secure=config.cookie_secure, httponly=False, domain=config.cookie_domain, path=config.access_csrf_cookie_path, samesite=config.cookie_samesite)
python
def rename_sectors(self, sectors): """ Sets new names for the sectors Parameters ---------- sectors : list or dict In case of dict: {'old_name' : 'new_name'} with an entry for each old_name which should be renamed In case of list: List of new names in order and complete without repetition """ if type(sectors) is list: sectors = {old: new for old, new in zip(self.get_sectors(), sectors)} for df in self.get_DataFrame(data=True): df.rename(index=sectors, columns=sectors, inplace=True) try: for ext in self.get_extensions(data=True): for df in ext.get_DataFrame(data=True): df.rename(index=sectors, columns=sectors, inplace=True) except: pass self.meta._add_modify("Changed sector names") return self
java
@Override public void eSet(int featureID, Object newValue) { switch (featureID) { case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_WT_CLASS: setFtWtClass((Integer)newValue); return; case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_WD_CLASS: setFtWdClass((Integer)newValue); return; case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_HEIGHT: setFtHeight((Integer)newValue); return; case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_WIDTH: setFtWidth((Integer)newValue); return; case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_DS_FLAGS: setFtDsFlags((Integer)newValue); return; case AfplibPackage.FONT_DESCRIPTOR_SPECIFICATION__FT_US_FLAGS: setFtUsFlags((Integer)newValue); return; } super.eSet(featureID, newValue); }
python
def inherit_docstring_from(cls): """ This decorator modifies the decorated function's docstring by replacing occurrences of '%(super)s' with the docstring of the method of the same name from the class `cls`. If the decorated method has no docstring, it is simply given the docstring of cls method. Extracted from scipy.misc.doccer. """ def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ func_docstring = func.__doc__ if func_docstring is None: func.__doc__ = cls_docstring else: new_docstring = func_docstring % dict(super=cls_docstring) func.__doc__ = new_docstring return func return _doc
python
def DeleteInstance(r, instance, dry_run=False): """ Deletes an instance. @type instance: str @param instance: the instance to delete @rtype: int @return: job id """ return r.request("delete", "/2/instances/%s" % instance, query={"dry-run": dry_run})
python
def _memo(f): """Return a function like f but caching its results. Its arguments must be hashable.""" memos = {} def memoized(*args): try: return memos[args] except KeyError: result = memos[args] = f(*args) return result return memoized
python
def example(fn): '''Wrap the examples so they generate readable output''' @functools.wraps(fn) def wrapped(): try: sys.stdout.write('Running: %s\n' % fn.__name__) fn() sys.stdout.write('\n') except KeyboardInterrupt: sys.stdout.write('\nSkipping example.\n\n') # Sleep a bit to make killing the script easier time.sleep(0.2) examples.append(wrapped) return wrapped
python
def receive(self): ''' Return the message received and the address. ''' try: msg, addr = self.skt.recvfrom(self.buffer_size) except socket.error as error: log.error('Received listener socket error: %s', error, exc_info=True) raise ListenerException(error) log.debug('[%s] Received %s from %s', msg, addr, time.time()) return msg, addr[0]
java
public static void initEN16931 (@Nonnull final ValidationExecutorSetRegistry aRegistry) { ValueEnforcer.notNull (aRegistry, "Registry"); // For better error messages LocationBeautifierSPI.addMappings (UBL21NamespaceContext.getInstance ()); LocationBeautifierSPI.addMappings (CIID16BNamespaceContext.getInstance ()); final boolean bDeprecated = true; final boolean bNotDeprecated = false; aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_CII_100, "EN 16931 CII " + VID_CII_100.getVersion (), bDeprecated, ValidationExecutorXSD.create (ECIID16BDocumentType.CROSS_INDUSTRY_INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_CII_100_XSLT), null, CIID16BNamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_CII_110, "EN 16931 CII " + VID_CII_110.getVersion (), bDeprecated, ValidationExecutorXSD.create (ECIID16BDocumentType.CROSS_INDUSTRY_INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_CII_110_XSLT), null, CIID16BNamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_CII_120, "EN 16931 CII " + VID_CII_120.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (ECIID16BDocumentType.CROSS_INDUSTRY_INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_CII_120_XSLT), null, CIID16BNamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_EDIFACT_100, "EN 16931 EDIFACT/ISO 20625 " + VID_EDIFACT_100.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (new JAXBDocumentType (MINVOIC.class, new CommonsArrayList <> (new ClassPathResource ("/schemas/INVOIC_D14B_ISO20625.xsd", _getCL ())), null)), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_EDIFACT_100_XSLT), null, null))); // Pure is quicker than XSLT aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_INVOICE_100, "EN 16931 UBL Invoice " + VID_UBL_INVOICE_100.getVersion (), bDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_100_XSLT), null, UBL21NamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_CREDIT_NOTE_100, "EN 16931 UBL CreditNote " + VID_UBL_CREDIT_NOTE_100.getVersion (), bDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.CREDIT_NOTE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_110_XSLT), null, UBL21NamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_INVOICE_110, "EN 16931 UBL Invoice " + VID_UBL_INVOICE_110.getVersion (), bDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_110_XSLT), null, UBL21NamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_CREDIT_NOTE_110, "EN 16931 UBL CreditNote " + VID_UBL_CREDIT_NOTE_110.getVersion (), bDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.CREDIT_NOTE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_110_XSLT), null, UBL21NamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_INVOICE_120, "EN 16931 UBL Invoice " + VID_UBL_INVOICE_120.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.INVOICE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_120_XSLT), null, UBL21NamespaceContext.getInstance ()))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_CREDIT_NOTE_120, "EN 16931 UBL CreditNote " + VID_UBL_CREDIT_NOTE_120.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.CREDIT_NOTE), new ValidationExecutorSchematron (new ValidationArtefact (EValidationType.SCHEMATRON_XSLT, _getCL (), INVOICE_UBL_120_XSLT), null, UBL21NamespaceContext.getInstance ()))); }
python
def format_data(data): """ Format bytes for printing :param data: Bytes :type data: None | bytearray | str :return: Printable version :rtype: unicode """ if data is None: return None return u":".join([u"{:02x}".format(ord(c)) for c in data])
python
def retrieve_records(self, timeperiod, include_running, include_processed, include_noop, include_failed, include_disabled): """ method looks for suitable UOW records and returns them as a dict""" resp = dict() try: query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, include_running, include_processed, include_noop, include_failed) records_list = self.uow_dao.run_query(query) if len(records_list) == 0: self.logger.warning('MX: no Freerun UOW records found since {0}.'.format(timeperiod)) for uow_record in records_list: # freerun uow.process_name is a composite in format <process_name::entry_name> handler_key = split_schedulable_name(uow_record.process_name) if handler_key not in self.freerun_handlers: continue thread_handler = self.freerun_handlers[handler_key] if not include_disabled and not thread_handler.process_entry.is_on: continue resp[uow_record.key] = uow_record.document except Exception as e: self.logger.error('MX Dashboard FreerunStatements error: {0}'.format(e)) return resp
python
def cache_mappings(file_path): """ Make a full mapping for 2 --> 3 columns. Output the mapping to json in the specified file_path. Note: This file is currently called maps.py, full path is PmagPy/pmagpy/mapping/maps.py. Parameters ---------- file_path : string with full file path to dump mapping json. Returns --------- maps : nested dictionary with format {table_name: {magic2_col_name: magic3_col_name, ...}, ...} """ def get_2_to_3(dm_type, dm): table_names3_2_table_names2 = {'measurements': ['magic_measurements'], 'locations': ['er_locations'], 'sites': ['er_sites', 'pmag_sites'], 'samples': ['er_samples', 'pmag_samples'], 'specimens': ['er_specimens', 'pmag_specimens'], 'ages': ['er_ages'], 'criteria': ['pmag_criteria'], 'images': ['er_images'], 'contribution': []} table_names3 = table_names3_2_table_names2[dm_type] dictionary = {} for label, row in dm.iterrows(): # if there are one or more corresponding 2.5 columns: if isinstance(row['previous_columns'], list): for previous_values in row['previous_columns']: previous_table = previous_values['table'] previous_value = previous_values['column'] if previous_table in table_names3: add_to_dict(previous_value, label, dictionary) elif previous_table in ["pmag_results", "rmag_results"]: if label not in dictionary.values(): if previous_value not in dictionary.keys(): add_to_dict(previous_value, label, dictionary) return dictionary def add_to_dict(key, value, dictionary): if key in dictionary: if value != dictionary[key]: print('W- OVERWRITING') print('was:', key, dictionary[key]) print('now:', key, value) dictionary[key] = value # begin data_model = DataModel() maps = {} for table_name in data_model.dm: dm = data_model.dm[table_name] new_mapping = get_2_to_3(table_name, dm) maps[table_name] = new_mapping # write maps out to file f = open(file_path, 'w') f.write("all_maps = ") json.dump(maps, f) f.close() return maps
java
public static String createIdentifier(EnhancedAnnotatedType<?> type, EjbDescriptor<?> descriptor) { StringBuilder builder = BeanIdentifiers.getPrefix(SessionBean.class); appendEjbNameAndClass(builder, descriptor); if (!type.isDiscovered()) { builder.append(BEAN_ID_SEPARATOR).append(type.slim().getIdentifier().asString()); } return builder.toString(); }
java
public NotificationChain basicSetPropertyParameters(PropertyParameters newPropertyParameters, NotificationChain msgs) { PropertyParameters oldPropertyParameters = propertyParameters; propertyParameters = newPropertyParameters; if (eNotificationRequired()) { ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, BpsimPackage.ELEMENT_PARAMETERS__PROPERTY_PARAMETERS, oldPropertyParameters, newPropertyParameters); if (msgs == null) msgs = notification; else msgs.add(notification); } return msgs; }
python
def _walk_paths(self, base: pathlib.PurePath) \ -> Iterator[pathlib.PurePath]: """ Internal helper for walking paths. This is required to exclude the name of the root entity from the walk. :param base: The base path to prepend to the entity name. :return: An iterator of paths. """ return self.walk_paths(base)
java
public static String toSocketAddressString(String host, int port) { String portStr = String.valueOf(port); return newSocketAddressStringBuilder( host, portStr, !isValidIpV6Address(host)).append(':').append(portStr).toString(); }
java
public void putRequestBaggage(String key, String value) { if (BAGGAGE_ENABLE && key != null && value != null) { requestBaggage.put(key, value); } }
java
public int chooseShardForInsert(DocumentID key) { int hashCode = key.hashCode(); return hashCode >= 0 ? hashCode % numShards : (-hashCode) % numShards; }
java
@Override public void sortSpecification(String collateName, boolean isAscending) { // collationName is ignored for now PropertyPath<TypeDescriptor<TypeMetadata>> property = resolveAlias(propertyPath); checkAnalyzed(property, false); //todo [anistor] cannot sort on analyzed field? if (sortFields == null) { sortFields = new ArrayList<>(ARRAY_INITIAL_LENGTH); } sortFields.add(new IckleParsingResult.SortFieldImpl<>(property, isAscending)); }
java
@SuppressWarnings("unchecked") @Override public String submitTopologyWithOpts(String topologyName, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws TException { LOG.info("Received topology: " + topologyName + ", uploadedJarLocation:" + uploadedJarLocation); long start = System.nanoTime(); //check whether topology name is valid if (!Common.charValidate(topologyName)) { throw new InvalidTopologyException(topologyName + " is not a valid topology name"); } Map<Object, Object> serializedConf = (Map<Object, Object>) JStormUtils.from_json(jsonConf); if (serializedConf == null) { LOG.error("Failed to serialize configuration"); throw new InvalidTopologyException("Failed to serialize topology configuration"); } Common.confValidate(serializedConf, data.getConf()); boolean enableDeploy = ConfigExtension.getTopologyHotDeplogyEnable(serializedConf); boolean isUpgrade = ConfigExtension.isUpgradeTopology(serializedConf); try { checkTopologyActive(data, topologyName, enableDeploy || isUpgrade); } catch (AlreadyAliveException e) { LOG.info(topologyName + " already exists "); throw e; } catch (NotAliveException e) { LOG.info(topologyName + " is not alive "); throw e; } catch (Throwable e) { LOG.info("Failed to check whether topology {} is alive or not", topologyName, e); throw new TException(e); } try { if (isUpgrade || enableDeploy) { LOG.info("start to deploy the topology"); String topologyId = getTopologyId(topologyName); if (topologyId == null) { throw new NotAliveException(topologyName); } if (isUpgrade) { TopologyInfo topologyInfo = getTopologyInfo(topologyId); if (topologyInfo == null) { throw new TException("Failed to get topology info"); } int workerNum = ConfigExtension.getUpgradeWorkerNum(serializedConf); String component = ConfigExtension.getUpgradeComponent(serializedConf); Set<String> workers = ConfigExtension.getUpgradeWorkers(serializedConf); if (!ConfigExtension.isTmSingleWorker(serializedConf, topologyInfo.get_topology().get_numWorkers())) { throw new TException("Gray upgrade requires that topology master to be a single worker, " + "cannot perform the upgrade!"); } return grayUpgrade(topologyId, uploadedJarLocation, topology, serializedConf, component, workers, workerNum); } else { LOG.info("start to kill old topology {}", topologyId); Map oldConf = new HashMap(); oldConf.putAll(conf); Map killedStormConf = StormConfig.read_nimbus_topology_conf(topologyId, data.getBlobStore()); if (killedStormConf != null) { oldConf.putAll(killedStormConf); } NimbusUtils.transitionName(data, topologyName, true, StatusType.kill, 0); KillTopologyEvent.pushEvent(topologyId); notifyTopologyActionListener(topologyName, "killTopology"); //wait all workers' are killed final long timeoutSeconds = ConfigExtension.getTaskCleanupTimeoutSec(oldConf); ConcurrentHashMap<String, Semaphore> topologyIdtoSem = data.getTopologyIdtoSem(); if (!topologyIdtoSem.contains(topologyId)) { topologyIdtoSem.putIfAbsent(topologyId, new Semaphore(0)); } Semaphore semaphore = topologyIdtoSem.get(topologyId); if (semaphore != null) { semaphore.tryAcquire(timeoutSeconds, TimeUnit.SECONDS); topologyIdtoSem.remove(semaphore); } LOG.info("successfully killed old topology {}", topologyId); } } } catch (Exception e) { String errMsg = "Failed to submit topology " + topologyName; LOG.error(errMsg, e); throw new TException(errMsg); } String topologyId; synchronized (data) { // avoid same topologies from being submitted at the same time Set<String> pendingTopologies = data.getPendingSubmitTopologies().buildMap().keySet(); Pattern topologyPattern = Pattern.compile("^" + topologyName + "-\\d+-\\d+$"); for (String cachedTopologyId : pendingTopologies) { if (topologyPattern.matcher(cachedTopologyId).matches()) { throw new AlreadyAliveException(topologyName + " were submitted"); } } int counter = data.getSubmittedCount().incrementAndGet(); topologyId = Common.topologyNameToId(topologyName, counter); data.getPendingSubmitTopologies().put(topologyId, null); } try { serializedConf.put(Config.TOPOLOGY_ID, topologyId); serializedConf.put(Config.TOPOLOGY_NAME, topologyName); Map<Object, Object> stormConf; stormConf = NimbusUtils.normalizeConf(conf, serializedConf, topology); LOG.info("Normalized configuration:" + stormConf); Map<Object, Object> totalStormConf = new HashMap<>(conf); totalStormConf.putAll(stormConf); StormTopology normalizedTopology = NimbusUtils.normalizeTopology(stormConf, topology, true); // this validates the structure of the topology Common.validate_basic(normalizedTopology, totalStormConf, topologyId); // don't need generate real topology, so skip Common.system_topology // Common.system_topology(totalStormConf, topology); StormClusterState stormClusterState = data.getStormClusterState(); // create /local-dir/nimbus/topologyId/xxxx files setupStormCode(topologyId, uploadedJarLocation, stormConf, normalizedTopology, false); // wait for blob replication before activate topology waitForDesiredCodeReplication(conf, topologyId); // generate TaskInfo for every bolt or spout in ZK // /ZK/tasks/topoologyId/xxx setupZkTaskInfo(conf, topologyId, stormClusterState); //mkdir topology error directory String path = Cluster.taskerror_storm_root(topologyId); stormClusterState.mkdir(path); String grayUpgradeBasePath = Cluster.gray_upgrade_base_path(topologyId); stormClusterState.mkdir(grayUpgradeBasePath); stormClusterState.mkdir(Cluster.gray_upgrade_upgraded_workers_path(topologyId)); stormClusterState.mkdir(Cluster.gray_upgrade_upgrading_workers_path(topologyId)); // make assignments for a topology LOG.info("Submit topology {} with conf {}", topologyName, serializedConf); makeAssignment(topologyName, topologyId, options.get_initial_status()); // push start event after startup double metricsSampleRate = ConfigExtension.getMetricSampleRate(stormConf); StartTopologyEvent.pushEvent(topologyId, metricsSampleRate); notifyTopologyActionListener(topologyName, "submitTopology"); } catch (InvalidTopologyException e) { LOG.error("Topology is invalid. {}", e.get_msg()); throw e; } catch (Exception e) { String errorMsg = String.format( "Fail to submit topology, topologyId:%s, uploadedJarLocation:%s, root cause:%s\n\n", e.getMessage() == null ? "submit timeout" : e.getMessage(), topologyId, uploadedJarLocation); LOG.error(errorMsg, e); throw new TopologyAssignException(errorMsg); } finally { data.getPendingSubmitTopologies().remove(topologyId); double spend = (System.nanoTime() - start) / TimeUtils.NS_PER_US; SimpleJStormMetric.updateNimbusHistogram("submitTopologyWithOpts", spend); LOG.info("submitTopologyWithOpts {} costs {}ms", topologyName, spend); } return topologyId; }
python
def retrieve_object_query(self, view_kwargs, filter_field, filter_value): """Build query to retrieve object :param dict view_kwargs: kwargs from the resource view :params sqlalchemy_field filter_field: the field to filter on :params filter_value: the value to filter with :return sqlalchemy query: a query from sqlalchemy """ return self.session.query(self.model).filter(filter_field == filter_value)
python
def validate_email(self, email_address): ''' a method to validate an email address :param email_address: string with email address to validate :return: dictionary with validation fields in response_details['json'] ''' title = '%s.validate_email' % __class__.__name__ # validate inputs object_title = '%s(email_address="")' % title email_address = self.fields.validate(email_address, '.email_address', object_title) # construct request_kwargs request_kwargs = { 'url': '%s/address/validate' % self.api_endpoint, 'params': { 'address': email_address } } # send request response_details = self._get_request(**request_kwargs) return response_details
java
private static TypedArray obtainStyledAttributes(@NonNull final Context context, @StyleRes final int themeResourceId, @AttrRes final int resourceId) { Condition.INSTANCE.ensureNotNull(context, "The context may not be null"); Theme theme = context.getTheme(); int[] attrs = new int[]{resourceId}; if (themeResourceId != -1) { return theme.obtainStyledAttributes(themeResourceId, attrs); } else { return theme.obtainStyledAttributes(attrs); } }
python
def _import_sub_module(module, name): """import_sub_module will mimic the function of importlib.import_module""" module = __import__(module.__name__ + "." + name) for level in name.split("."): module = getattr(module, level) return module
java
public static void setInputKeyCobolContext(Job job, Class<? extends CobolContext> cobolContext) { job.getConfiguration().setClass(CONF_INPUT_KEY_COBOL_CONTEXT, cobolContext, CobolContext.class); }
python
def to_dict(self): # type: () -> OrderedDict """Create a dictionary representation of object attributes Returns: OrderedDict serialised version of self """ d = OrderedDict() if self.typeid: d["typeid"] = self.typeid for k in self.call_types: # check_camel_case(k) d[k] = serialize_object(getattr(self, k)) return d
python
def _generateForOAuthSecurity(self, client_id, secret_id, token_url=None): """ generates a token based on the OAuth security model """ grant_type="client_credentials" if token_url is None: token_url = "https://www.arcgis.com/sharing/rest/oauth2/token" params = { "client_id" : client_id, "client_secret" : secret_id, "grant_type":grant_type, "f" : "json" } token = self._post(url=token_url, param_dict=params, securityHandler=None, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'access_token' in token: self._token = token['access_token'] self._expires_in = token['expires_in'] self._token_created_on = datetime.datetime.now() self._token_expires_on = self._token_created_on + datetime.timedelta(seconds=int(token['expires_in'])) self._valid = True self._message = "Token Generated" else: self._token = None self._expires_in = None self._token_created_on = None self._token_expires_on = None self._valid = False self._message = token
python
def send(self, data): """Send message to the server :param str data: message. """ if not self._ws_connection: raise RuntimeError('Web socket connection is closed.') self._ws_connection.write_message(json.dumps(data))
java
public LambdaDslObject minArrayLike(String name, Integer size, Consumer<LambdaDslObject> nestedObject) { final PactDslJsonBody minArrayLike = object.minArrayLike(name, size); final LambdaDslObject dslObject = new LambdaDslObject(minArrayLike); nestedObject.accept(dslObject); minArrayLike.closeArray(); return this; }
python
def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
python
def get_binary_path(executable, logging_level='INFO'): """Gets the software name and returns the path of the binary.""" if sys.platform == 'win32': if executable == 'start': return executable executable = executable + '.exe' if executable in os.listdir('.'): binary = os.path.join(os.getcwd(), executable) else: binary = next((os.path.join(path, executable) for path in os.environ['PATH'].split(os.pathsep) if os.path.isfile(os.path.join(path, executable))), None) else: venv_parent = get_venv_parent_path() venv_bin_path = os.path.join(venv_parent, '.venv', 'bin') if not venv_bin_path in os.environ.get('PATH'): if logging_level == 'DEBUG': print(f'Adding path {venv_bin_path} to environment PATH variable') os.environ['PATH'] = os.pathsep.join([os.environ['PATH'], venv_bin_path]) binary = shutil.which(executable) return binary if binary else None
python
def bootstrap_fit( rv_cont, data, n_iter=10, quant=95, print_params=True, **kwargs ): """Bootstrap a distribution fit + get confidence intervals for the params. Parameters ========== rv_cont: scipy.stats.rv_continuous instance The distribution which to fit. data: array-like, 1d The data on which to fit. n_iter: int [default=10] Number of bootstrap iterations. quant: int [default=95] percentile of the confidence limits (default is 95, i.e. 2.5%-97.5%) print_params: bool [default=True] Print a fit summary. """ fit_params = bootstrap_params(rv_cont, data, n_iter) par, lo, up = param_describe(fit_params, quant=quant) names = param_names(rv_cont) maxlen = max([len(s) for s in names]) print("--------------") print(rv_cont.name) print("--------------") for i, name in enumerate(names): print( "{nam:>{fill}}: {mean:+.3f} ∈ " "[{lo:+.3f}, {up:+.3f}] ({q}%)".format( nam=name, fill=maxlen, mean=par[i], lo=lo[i], up=up[i], q=quant ) ) out = { 'mean': par, 'lower limit': lo, 'upper limit': up, } return out
python
def markdown(text, renderer=None, **options): """ Parses the provided Markdown-formatted text into valid HTML, and returns it as a :class:`flask.Markup` instance. :param text: Markdown-formatted text to be rendered into HTML :param renderer: A custom misaka renderer to be used instead of the default one :param options: Additional options for customizing the default renderer :return: A :class:`flask.Markup` instance representing the rendered text """ ext, rndr = make_flags(**options) if renderer: md = misaka.Markdown(renderer,ext) result = md(text) else: result = misaka.html(text, extensions=ext, render_flags=rndr) if options.get("smartypants"): result = misaka.smartypants(result) return Markup(result)
java
@SuppressWarnings("unchecked") public <R> ScoredValue<R> map(Function<? super V, ? extends R> mapper) { LettuceAssert.notNull(mapper, "Mapper function must not be null"); if (hasValue()) { return new ScoredValue<>(score, mapper.apply(getValue())); } return (ScoredValue<R>) this; }
java
public RemoteIterator<LocatedFileStatus> listFiles(Path f, boolean recursive) throws FileNotFoundException, IOException { try (Closeable context = new TimerContextWithLog(this.listFilesTimer.time(), "listFiles", f, recursive)) { return super.listFiles(f, recursive); } }
java
static Function<Optional<Descriptor>, String> fieldNumbersFunction( final String fmt, final Iterable<Integer> fieldNumbers) { return new Function<Optional<Descriptor>, String>() { @Override public String apply(Optional<Descriptor> optDescriptor) { return resolveFieldNumbers(optDescriptor, fmt, fieldNumbers); } }; }
python
def uninstall(pecls): ''' Uninstall one or several pecl extensions. pecls The pecl extensions to uninstall. CLI Example: .. code-block:: bash salt '*' pecl.uninstall fuse ''' if isinstance(pecls, six.string_types): pecls = [pecls] return _pecl('uninstall {0}'.format(_cmd_quote(' '.join(pecls))))
java
@Override void setScanResult(final ScanResult scanResult) { super.setScanResult(scanResult); if (typeArguments != null) { for (final TypeArgument typeArgument : typeArguments) { typeArgument.setScanResult(scanResult); } } if (suffixTypeArguments != null) { for (final List<TypeArgument> list : suffixTypeArguments) { for (final TypeArgument typeArgument : list) { typeArgument.setScanResult(scanResult); } } } }
python
def lstsq(cls, a, b): """Return the least-squares solution to a linear matrix equation. :param Matrix a: Design matrix with the values of the independent variables. :param Matrix b: Matrix with the "dependent variable" values. b can only have one column. :raise: Raises an :py:exc:`ValueError`, if - the number of rows of a and b does not match. - b has more than one column. :note: The algorithm solves the following equations. beta = a^+ b. """ # Check if the size of the input matrices matches if a.get_height() != b.get_height(): raise ValueError("Size of input matrices does not match") if b.get_width() != 1: raise ValueError("Matrix with dependent variable has more than 1 column") aPseudo = a.pseudoinverse() # The following code could be used if c is regular. # aTrans = a.transform() # c = aTrans * a # invers() raises an ValueError, if c is not invertible # cInvers = c.invers() # beta = cInvers * aTrans * b beta = aPseudo * b return beta
java
public static int getMemoryInUse() { Runtime runtime = Runtime.getRuntime(); long mb = 1024 * 1024; long total = runtime.totalMemory(); long free = runtime.freeMemory(); return (int) ((total - free) / mb); }
python
def get_dashboard_version(self, id, version, **kwargs): # noqa: E501 """Get a specific version of a specific dashboard # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_dashboard_version(id, version, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param int version: (required) :return: ResponseContainerDashboard If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501 else: (data) = self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501 return data
python
def get_season(self, season_key, card_type="micro_card"): """ Calling Season API. Arg: season_key: key of the season card_type: optional, default to micro_card. Accepted values are micro_card & summary_card Return: json data """ season_url = self.api_path + "season/" + season_key + "/" params = {} params["card_type"] = card_type response = self.get_response(season_url, params) return response
java
public BigDecimal getBigDecimal( int index ) throws OdaException { BigDecimal value = (BigDecimal) getFieldValue(index); return value==null?new BigDecimal(0):value; }
python
def _save_new_defaults(self, defaults, new_version, subfolder): """Save new defaults""" new_defaults = DefaultsConfig(name='defaults-'+new_version, subfolder=subfolder) if not osp.isfile(new_defaults.filename()): new_defaults.set_defaults(defaults) new_defaults._save()
java
public void notReady() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "notReady"); updateLastNotReadyTime(); super.notReady(); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "notReady"); }
java
@Override public ListJobExecutionsForThingResult listJobExecutionsForThing(ListJobExecutionsForThingRequest request) { request = beforeClientExecution(request); return executeListJobExecutionsForThing(request); }
python
def td_is_finished(tomodir): """Return the state of modeling and inversion for a given tomodir. The result does not take into account sensitivities or potentials, as optionally generated by CRMod. Parameters ---------- tomodir: string Directory to check Returns ------- crmod_is_finished: bool True if a successful CRMod result is contained in the tomodir directory. crtomo_is_finished: bool True if a successful CRTomo inversion results is contained in the tomodir directory. """ if not is_tomodir(tomodir): raise Exception('Supplied directory is not a tomodir!') # crmod finished is determined by: # config.dat/rho.dat/crmod.cfg are present # volt.dat is present if(os.path.isfile(tomodir + os.sep + 'config/config.dat') and os.path.isfile(tomodir + os.sep + 'rho/rho.dat') and os.path.isfile(tomodir + os.sep + 'grid/elem.dat') and os.path.isfile(tomodir + os.sep + 'grid/elec.dat') and os.path.isfile(tomodir + os.sep + 'exe/crmod.cfg') and os.path.isfile(tomodir + os.sep + 'mod/volt.dat')): crmod_is_finished = True else: crmod_is_finished = False # crtomo is finished if # crtomo.cfg/volt.dat/elem.dat/elec.dat are present # inv/run.ctr contains the word "CPU" in the last line if(os.path.isfile(tomodir + os.sep + 'grid/elem.dat') and os.path.isfile(tomodir + os.sep + 'grid/elec.dat') and os.path.isfile(tomodir + os.sep + 'exe/crtomo.cfg') and os.path.isfile(tomodir + os.sep + 'inv/inv.ctr') and os.path.isfile(tomodir + os.sep + 'inv/run.ctr') and os.path.isfile(tomodir + os.sep + 'mod/volt.dat')): with open(tomodir + os.sep + 'inv/run.ctr', 'r') as fid: lines = fid.readlines() crtomo_is_finished = False # check the last 5 lines for line in lines[-5:]: test_line = line.strip() regex = re.compile('CPU') result = regex.match(test_line) if result is not None: crtomo_is_finished = True else: crtomo_is_finished = False return crmod_is_finished, crtomo_is_finished
java
private Preference.OnPreferenceChangeListener createShowValueAsSummaryListener() { return new Preference.OnPreferenceChangeListener() { @Override public boolean onPreferenceChange(final Preference preference, final Object newValue) { boolean showValueAsSummary = (Boolean) newValue; editTextPreference.showValueAsSummary(showValueAsSummary); listPreference.showValueAsSummary(showValueAsSummary); multiChoiceListPreference.showValueAsSummary(showValueAsSummary); seekBarPreference.showValueAsSummary(showValueAsSummary); numberPickerPreference.showValueAsSummary(showValueAsSummary); digitPickerPreference.showValueAsSummary(showValueAsSummary); resolutionPreference.showValueAsSummary(showValueAsSummary); colorPalettePreference.showValueAsSummary(showValueAsSummary); adaptSwitchPreferenceSummary(showValueAsSummary); return true; } }; }
python
def _py_code_variables(lines, executable, lparams, tab): """Adds the variable code lines for all the parameters in the executable. :arg lparams: a list of the local variable declarations made so far that need to be passed to the executable when it is called. """ allparams = executable.ordered_parameters if type(executable).__name__ == "Function": allparams = allparams + [executable] for p in allparams: _py_code_parameter(lines, p, "invar", lparams, tab) if p.direction == "(out)": #We need to reverse the order of the indices to match the fortran code #generation of the wrapper. _py_code_parameter(lines, p, "outvar", lparams, tab) _py_code_parameter(lines, p, "indices", lparams, tab) else: _py_code_parameter(lines, p, "indices", lparams, tab) _py_code_parameter(lines, p, "outvar", lparams, tab)
java
public static Class<?> getComponentClass(final Object object) { if (object == null) { return null; } return object.getClass().getComponentType(); }
java
protected base_resource[] get_nitro_bulk_response(nitro_service service, String response) throws Exception { traceroute_responses result = (traceroute_responses) service.get_payload_formatter().string_to_resource(traceroute_responses.class, response); if(result.errorcode != 0) { if (result.errorcode == SESSION_NOT_EXISTS) service.clear_session(); throw new nitro_exception(result.message, result.errorcode, (base_response [])result.traceroute_response_array); } traceroute[] result_traceroute = new traceroute[result.traceroute_response_array.length]; for(int i = 0; i < result.traceroute_response_array.length; i++) { result_traceroute[i] = result.traceroute_response_array[i].traceroute[0]; } return result_traceroute; }
java
private void processConsumerSetChangeCallback (CommsByteBuffer buffer, Conversation conversation) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "processConsumerSetChangeCallback", new Object[]{buffer, conversation}); final ClientConversationState convState = (ClientConversationState) conversation.getAttachment(); //final SICoreConnection connection = convState.getSICoreConnection(); final short connectionObjectId = buffer.getShort(); final short consumerMonitorListenerid = buffer.getShort(); final boolean isEmpty = buffer.getBoolean(); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "connectionObjectId="+connectionObjectId); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "consumerMonitorListenerid="+consumerMonitorListenerid); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "isEmpty="+isEmpty); // Look up the real ConsumerSetChangeCallback in the local ConsumerMonitorListenerCache final ConsumerMonitorListenerCache consumerMonitorListenerCache = convState.getConsumerMonitorListenerCache(); final ConsumerSetChangeCallback consumerSetChangeCallback = consumerMonitorListenerCache.get(consumerMonitorListenerid); if (consumerSetChangeCallback != null) { //Call the listener on a seperate thread. ClientAsynchEventThreadPool.getInstance().dispatchConsumerSetChangeCallbackEvent(consumerSetChangeCallback,isEmpty); } else { if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, "consumerMonitorListenerid="+consumerMonitorListenerid+" not found in consumerMonitorListenerCache"); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.debug(this, tc, consumerMonitorListenerCache.toString()); SIErrorException e = new SIErrorException(nls.getFormattedMessage("UNABLE_TO_FIND_CONSUMER_MONITOR_LISTENER_SICO8024", new Object[] {consumerMonitorListenerid}, null)); FFDCFilter.processException(e, CLASS_NAME + ".processConsumerSetChangeCallback", CommsConstants.PROXYRECEIVELISTENER_CONSUMERMON_CALLBACK_01, this); SibTr.error(tc, "An internal error occurred. The consumerMonitorListenerid "+consumerMonitorListenerid+" received by the client can not be located."); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "processConsumerSetChangeCallback"); throw e; } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "processConsumerSetChangeCallback"); }
java
@Override public void process(HttpServerRequest vertxRequest, ContainerRequest jerseyRequest, Handler<Void> done) { jerseyRequest.setProperty(FIRST_BYTE_TIMER_CONTEXT, firstByteTimer.time()); jerseyRequest.setProperty(LAST_BYTE_TIMER_CONTEXT, lastByteTimer.time()); done.handle(null); }
java
public Map getQueryParams() { Map params = _codec.getExistingParams(); Map newParams = new HashMap(); addSortParams(newParams); addFilterParams(newParams); addPagerParams(newParams); params = mergeMaps(params, newParams); params = transformMap(params); return params; }
java
@RequestMapping("/web") public void webPay(@RequestParam("orderNumber") String orderNumber, HttpServletResponse resp){ WebPayDetail detail = new WebPayDetail(orderNumber, "测试订单-" + orderNumber, "0.01"); String form = alipayService.webPay(detail); logger.info("web pay form: {}", form); try { resp.setContentType("text/html;charset=UTF-8"); resp.setCharacterEncoding("UTF-8"); resp.getWriter().write(form); resp.setStatus(HttpServletResponse.SC_OK); } catch (IOException e) { // ignore } }
python
def kill(self): """Sometime terminate() is not enough, we must "help" external modules to die... :return: None """ logger.info("Killing external module (pid=%d) for module %s...", self.process.pid, self.name) if os.name == 'nt': self.process.terminate() else: self.process.terminate() # Wait for 10 seconds before killing the process abruptly self.process.join(timeout=KILL_TIME) # You do not let me another choice guy... if self.process.is_alive(): logger.warning("%s is still living %d seconds after a normal kill, " "I help it to die", self.name, KILL_TIME) os.kill(self.process.pid, signal.SIGKILL) self.process.join(1) if self.process.is_alive(): logger.error("%s still living after brutal kill, I leave it.", self.name) logger.info("External module killed")
java
@Override public List<CommerceSubscriptionEntry> findAll() { return findAll(QueryUtil.ALL_POS, QueryUtil.ALL_POS, null); }
python
def upload_file_sections(self, user_id, section_id, assignment_id): """ Upload a file. Upload a file to a submission. This API endpoint is the first step in uploading a file to a submission as a student. See the {file:file_uploads.html File Upload Documentation} for details on the file upload workflow. The final step of the file upload workflow will return the attachment data, including the new file id. The caller can then POST to submit the +online_upload+ assignment with these file ids. """ path = {} data = {} params = {} # REQUIRED - PATH - section_id """ID""" path["section_id"] = section_id # REQUIRED - PATH - assignment_id """ID""" path["assignment_id"] = assignment_id # REQUIRED - PATH - user_id """ID""" path["user_id"] = user_id self.logger.debug("POST /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}/files".format(**path), data=data, params=params, no_data=True)
python
def load(self, urlpath, output=None, **kwargs): """ Downloads data from a given url, generates a hashed filename, logs metadata, and caches it locally. Parameters ---------- urlpath: str, location of data May be a local path, or remote path if including a protocol specifier such as ``'s3://'``. May include glob wildcards. output: bool Whether to show progress bars; turn off for testing Returns ------- List of local cache_paths to be opened instead of the remote file(s). If caching is disable, the urlpath is returned. """ if conf.get('cache_disabled', False): return [urlpath] self.output = output if output is not None else conf.get( 'cache_download_progress', True) cache_paths = self._from_metadata(urlpath) if cache_paths is None: files_in, files_out = self._make_files(urlpath) self._load(files_in, files_out, urlpath) cache_paths = self._from_metadata(urlpath) return cache_paths