language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def get_temp_url(self, obj, seconds, method="GET", key=None, cached=True): """ Given a storage object in this container, returns a URL that can be used to access that object. The URL will expire after `seconds` seconds. The only methods supported are GET and PUT. Anything else will raise an `InvalidTemporaryURLMethod` exception. If you have your Temporary URL key, you can pass it in directly and potentially save an API call to retrieve it. If you don't pass in the key, and don't wish to use any cached value, pass `cached=False`. """ return self.manager.get_temp_url(self, obj, seconds, method=method, key=key, cached=cached)
python
def fetch(db=None, sql=None): ''' Retrieve data from an sqlite3 db (returns all rows, be careful!) CLI Example: .. code-block:: bash salt '*' sqlite3.fetch /root/test.db 'SELECT * FROM test;' ''' cur = _connect(db) if not cur: return False cur.execute(sql) rows = cur.fetchall() return rows
java
public Observable<Page<ManagedInstanceEncryptionProtectorInner>> listByInstanceAsync(final String resourceGroupName, final String managedInstanceName) { return listByInstanceWithServiceResponseAsync(resourceGroupName, managedInstanceName) .map(new Func1<ServiceResponse<Page<ManagedInstanceEncryptionProtectorInner>>, Page<ManagedInstanceEncryptionProtectorInner>>() { @Override public Page<ManagedInstanceEncryptionProtectorInner> call(ServiceResponse<Page<ManagedInstanceEncryptionProtectorInner>> response) { return response.body(); } }); }
python
def _make_intersection(edge_info, all_edge_nodes): """Convert a description of edges into a curved polygon. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Args: edge_info (Tuple[Tuple[int, float, float], ...]): Information describing each edge in the curved polygon by indicating which surface / edge on the surface and then start and end parameters along that edge. (See :func:`.ends_to_curve`.) all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. Returns: .CurvedPolygon: The intersection corresponding to ``edge_info``. """ edges = [] for index, start, end in edge_info: nodes = all_edge_nodes[index] new_nodes = _curve_helpers.specialize_curve(nodes, start, end) degree = new_nodes.shape[1] - 1 edge = _curve_mod.Curve(new_nodes, degree, _copy=False) edges.append(edge) return curved_polygon.CurvedPolygon( *edges, metadata=edge_info, _verify=False )
java
private int doContentCache() throws IOException { // file cacheItem = generateCacheResource(key, true); // use cache if (cacheItem.isValid(timespan)) { pageContext.write(cacheItem.getValue()); doCaching = false; return SKIP_BODY; } doCaching = true; return EVAL_BODY_BUFFERED; }
java
public static af_persistant_stat_info[] get(nitro_service client) throws Exception { af_persistant_stat_info resource = new af_persistant_stat_info(); resource.validate("get"); return (af_persistant_stat_info[]) resource.get_resources(client); }
java
public java.util.List<com.google.appengine.v1.Version> getVersionsList() { return versions_; }
java
synchronized void upsertMessages(ArrayList<CTMessageDAO> inboxMessages){ if (!this.belowMemThreshold()) { Logger.v("There is not enough space left on the device to store data, data discarded"); return; } //noinspection TryFinallyCanBeTryWithResources try { final SQLiteDatabase db = dbHelper.getWritableDatabase(); for(CTMessageDAO messageDAO : inboxMessages) { final ContentValues cv = new ContentValues(); cv.put(_ID, messageDAO.getId()); cv.put(KEY_DATA, messageDAO.getJsonData().toString()); cv.put(WZRKPARAMS, messageDAO.getWzrkParams().toString()); cv.put(CAMPAIGN,messageDAO.getCampaignId()); cv.put(TAGS, messageDAO.getTags()); cv.put(IS_READ, messageDAO.isRead()); cv.put(EXPIRES, messageDAO.getExpires()); cv.put(KEY_CREATED_AT,messageDAO.getDate()); cv.put(USER_ID,messageDAO.getUserId()); db.insertWithOnConflict(Table.INBOX_MESSAGES.getName(),null,cv,SQLiteDatabase.CONFLICT_REPLACE); } } catch (final SQLiteException e) { getConfigLogger().verbose("Error adding data to table " + Table.INBOX_MESSAGES.getName()); } finally { dbHelper.close(); } }
python
def kamada_kawai(self,defaultEdgeWeight=None,EdgeAttribute=None,\ m_anticollisionSpringStrength=None,m_averageIterationsPerNode=None,\ m_disconnectedNodeDistanceSpringRestLength=None,\ m_disconnectedNodeDistanceSpringStrength=None,m_layoutPass=None,\ m_nodeDistanceRestLengthConstant=None,m_nodeDistanceStrengthConstant=None,\ maxWeightCutoff=None,minWeightCutoff=None,network=None,NodeAttribute=None,\ nodeList=None,randomize=None,singlePartition=None,Type=None,unweighted=None,\ verbose=None): """ Execute the Edge-weighted Spring Embedded Layout on a network. :param defaultEdgeWeight (string, optional): The default edge weight to con sider, default is 0.5 :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param m_anticollisionSpringStrength (string, optional): Strength to apply to avoid collisions, in numeric value :param m_averageIterationsPerNode (string, optional): Average number of ite ratations for each node, in numeric value :param m_disconnectedNodeDistanceSpringRestLength (string, optional): Rest length of a 'disconnected' spring, in numeric value :param m_disconnectedNodeDistanceSpringStrength (string, optional): Strengt h of a 'disconnected' spring, in numeric value :param m_layoutPass (string, optional): Number of layout passes, in numeric value :param m_nodeDistanceRestLengthConstant (string, optional): Spring rest len gth, in numeric value :param m_nodeDistanceStrengthConstant (string, optional): Spring strength, in numeric value :param maxWeightCutoff (string, optional): The maximum edge weight to consi der, default to the Double.MAX value :param minWeightCutoff (string, optional): The minimum edge weight to consi der, numeric values, default is 0 :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param randomize (string, optional): Randomize graph before layout; boolean values only, true or false; defaults to true :param singlePartition (string, optional): Don't partition graph before lay out; boolean values only, true or false; defaults to false :param Type (string, optional): How to interpret weight values; must be one of Heuristic, -Log(value), 1 - normalized value and normalized valu e. Defaults to Heuristic = ['Heuristic', '-Log(value)', '1 - normali zed value', 'normalized value'] :param unweighted (string, optional): Use unweighted edges; boolean values only, true or false; defaults to false """ network=check_network(self,network,verbose=verbose) PARAMS=set_param(['defaultEdgeWeight','EdgeAttribute',\ 'm_anticollisionSpringStrength','m_averageIterationsPerNode',\ 'm_disconnectedNodeDistanceSpringRestLength',\ 'm_disconnectedNodeDistanceSpringStrength','m_layoutPass',\ 'm_nodeDistanceRestLengthConstant','m_nodeDistanceStrengthConstant',\ 'maxWeightCutoff','minWeightCutoff','network','NodeAttribute','nodeList',\ 'randomize','singlePartition','Type','unweighted'],[defaultEdgeWeight,\ EdgeAttribute,m_anticollisionSpringStrength,m_averageIterationsPerNode,\ m_disconnectedNodeDistanceSpringRestLength,\ m_disconnectedNodeDistanceSpringStrength,m_layoutPass,\ m_nodeDistanceRestLengthConstant,m_nodeDistanceStrengthConstant,\ maxWeightCutoff,minWeightCutoff,network,NodeAttribute,nodeList,randomize,\ singlePartition,Type,unweighted]) response=api(url=self.__url+"/kamada-kawai", PARAMS=PARAMS, method="POST", verbose=verbose) return response
java
public Observable<List<ApplicationInfoResponse>> listAsync(ListAppsOptionalParameter listOptionalParameter) { return listWithServiceResponseAsync(listOptionalParameter).map(new Func1<ServiceResponse<List<ApplicationInfoResponse>>, List<ApplicationInfoResponse>>() { @Override public List<ApplicationInfoResponse> call(ServiceResponse<List<ApplicationInfoResponse>> response) { return response.body(); } }); }
java
@SafeVarargs public static <T extends Model, R> Select select(TypeFunction<T, R>... functions) { return select( Arrays.stream(functions) .map(AnimaUtils::getLambdaColumnName) .collect(joining(", "))); }
python
def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates blank versions of the form and its inline formsets. """ # Prepare base if 'pk' in kwargs: self.object = self.get_object() else: self.object = None form_class = self.get_form_class() # Get prefix if 'field_prefix' in form_class.Meta.__dict__: # Get name from the form field_prefix = form_class.Meta.field_prefix else: # Get name from the class field_prefix = str(form_class).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Build form form = self.get_form(form_class) # Find groups if 'groups' in dir(self): # Save groups groups = self.groups # Redefine groups inside the form form.__groups__ = lambda: groups # Initialize list of fields fields = [] else: groups = None # Add special prefix support to properly support form independency form.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) if 'autofill' not in dir(form.Meta): form.Meta.autofill = {} # For every extra form forms = [] position_form_default = 0 for (formelement, linkerfield, modelfilter) in self.forms: if formelement is None: formobj = form position_form_default = len(forms) else: # Locate linked element if self.object: related_name = formelement._meta.model._meta.get_field(linkerfield).related_query_name() queryset = getattr(self.object, related_name) if modelfilter: queryset = queryset.filter(eval("Q(%s)" % (modelfilter))) get_method = getattr(queryset, 'get', None) if get_method: instance = queryset.get() else: instance = queryset else: instance = None if 'autofill' in dir(formelement.Meta): formname = str(formelement).split('.')[-1].split("'")[0] for key in formelement.Meta.autofill: form.Meta.autofill['{}_{}'.format(formname, key)] = formelement.Meta.autofill[key] # Get prefix if 'field_prefix' in formelement.Meta.__dict__: # Get name from the form field_prefix = formelement.Meta.field_prefix else: # Get name from the class field_prefix = str(formelement).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Prepare form formobj = formelement(instance=instance) formobj.form_name = form.form_name # Excluded fields if 'exclude' not in formobj.Meta.__dict__: formobj.Meta.exclude = [linkerfield] elif linkerfield not in formobj.Meta.exclude: formobj.Meta.exclude.append(linkerfield) if linkerfield in formobj.fields: del(formobj.fields[linkerfield]) # Add special prefix support to properly support form independency formobj.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) formobj.scope_prefix = field_prefix # Save fields to the list if groups: for field in formobj: fields.append(field) else: # Add the form to the list of forms forms.append(formobj) if position_form_default == 0: open_tabs = 1 else: open_tabs = 0 # Remember list of fields if groups: form.list_fields = fields # Add context and return new context return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
python
def submit_cookbook(self, cookbook, params={}, _extra_params={}): """ Submit a cookbook. """ self._check_user_parameters(params) files = {'cookbook': cookbook} return self._submit(params, files, _extra_params=_extra_params)
java
final public void print(int i) { Writer out = this.out; if (out == null) return; if (i == 0x80000000) { print("-2147483648"); return; } try { if (i < 0) { out.write('-'); i = -i; } else if (i < 9) { out.write('0' + i); return; } int length = 0; int exp = 10; if (i >= 1000000000) length = 9; else { for (; i >= exp; length++) exp = 10 * exp; } int j = 31; while (i > 0) { _tempCharBuffer[--j] = (char) ((i % 10) + '0'); i /= 10; } out.write(_tempCharBuffer, j, 31 - j); } catch (IOException e) { log.log(Level.FINE, e.toString(), e); } }
java
public Options addyAxis(final Axis yAxis) { if (this.getyAxis() == null) { this.setyAxis(new ArrayList<Axis>()); } this.getyAxis().add(yAxis); return this; }
java
public void cacheConcept(Concept concept) { conceptCache.put(concept.id(), concept); if (concept.isSchemaConcept()) { SchemaConcept schemaConcept = concept.asSchemaConcept(); schemaConceptCache.put(schemaConcept.label(), schemaConcept); labelCache.put(schemaConcept.label(), schemaConcept.labelId()); } }
java
public static long getMultipartSizeProperty(Configuration conf, String property, long defVal) { long partSize = conf.getLongBytes(property, defVal); if (partSize < MULTIPART_MIN_SIZE) { LOG.warn("{} must be at least 5 MB; configured value is {}", property, partSize); partSize = MULTIPART_MIN_SIZE; } return partSize; }
java
public final void mSFIXED32() throws RecognitionException { try { int _type = SFIXED32; int _channel = DEFAULT_TOKEN_CHANNEL; // com/dyuproject/protostuff/parser/ProtoLexer.g:187:5: ( 'sfixed32' ) // com/dyuproject/protostuff/parser/ProtoLexer.g:187:9: 'sfixed32' { match("sfixed32"); } state.type = _type; state.channel = _channel; } finally { } }
java
private LockedObject generateTempLockedObjects(ITransaction transaction, String path) { if (!_tempLocks.containsKey(path)) { LockedObject returnObject = new LockedObject(this, path, _temporary); String parentPath = getParentPath(path); if (parentPath != null) { LockedObject parentLockedObject = generateTempLockedObjects( transaction, parentPath); parentLockedObject.addChild(returnObject); returnObject._parent = parentLockedObject; } return returnObject; } else { // there is already a LockedObject on the specified path return (LockedObject) this._tempLocks.get(path); } }
java
public boolean isBaseLevelForKey(Slice userKey) { // Maybe use binary search to find right entry instead of linear search? UserComparator userComparator = inputVersion.getInternalKeyComparator().getUserComparator(); for (int level = this.level + 2; level < NUM_LEVELS; level++) { List<FileMetaData> files = inputVersion.getFiles(level); while (levelPointers[level] < files.size()) { FileMetaData f = files.get(levelPointers[level]); if (userComparator.compare(userKey, f.getLargest().getUserKey()) <= 0) { // We've advanced far enough if (userComparator.compare(userKey, f.getSmallest().getUserKey()) >= 0) { // Key falls in this file's range, so definitely not base level return false; } break; } levelPointers[level]++; } } return true; }
python
def filter(self, criteria: Q, offset: int = 0, limit: int = 10, order_by: list = ()) -> ResultSet: """ Filter objects from the repository. Method must return a `ResultSet` object """
python
def get_default_config(self): """ Return the default config for the handler """ config = super(HostedGraphiteHandler, self).get_default_config() config.update({ 'apikey': '', 'host': 'carbon.hostedgraphite.com', 'port': 2003, 'proto': 'tcp', 'timeout': 15, 'batch': 1, 'max_backlog_multiplier': 5, 'trim_backlog_multiplier': 4, }) return config
python
def post_process(self, xout, yout, params): """ Transforms internal values to output, used internally. """ for post_processor in self.post_processors: xout, yout, params = post_processor(xout, yout, params) return xout, yout, params
java
public static Atomic atomic(Runnable runnable) { try { AnimaQuery.beginTransaction(); runnable.run(); AnimaQuery.commit(); return Atomic.ok(); } catch (Exception e) { boolean isRollback = ifReturn( of().rollbackException.isInstance(e), () -> { AnimaQuery.rollback(); return true; }, () -> false); return Atomic.error(e).rollback(isRollback); } finally { AnimaQuery.endTransaction(); } }
java
public static CPOptionValue fetchByC_K(long CPOptionId, String key, boolean retrieveFromCache) { return getPersistence().fetchByC_K(CPOptionId, key, retrieveFromCache); }
python
def arguments_from_signature(signature, *args, **kwargs): """Validate signature against `args` and `kwargs` and return the kwargs asked for in the signature Parameters ---------- args : Tuple[object...] kwargs : Dict[str, object] Returns ------- Tuple[Tuple, Dict[str, Any]] Examples -------- >>> from inspect import signature >>> def foo(a, b=1): ... return a + b >>> foo_sig = signature(foo) >>> args, kwargs = arguments_from_signature(foo_sig, 1, b=2) >>> args (1,) >>> kwargs {'b': 2} >>> def bar(a): ... return a + 1 >>> bar_sig = signature(bar) >>> args, kwargs = arguments_from_signature(bar_sig, 1, b=2) >>> args (1,) >>> kwargs {} """ bound = signature.bind_partial(*args) meta_kwargs = toolz.merge({'kwargs': kwargs}, kwargs) remaining_parameters = signature.parameters.keys() - bound.arguments.keys() new_kwargs = { k: meta_kwargs[k] for k in remaining_parameters if k in signature.parameters if signature.parameters[k].kind in { Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD, Parameter.VAR_KEYWORD, } } return args, new_kwargs
java
static public Set<String> expandFileNames(String[] filePathList) throws SnowflakeSQLException { Set<String> result = new HashSet<String>(); // a location to file pattern map so that we only need to list the // same directory once when they appear in multiple times. Map<String, List<String>> locationToFilePatterns; locationToFilePatterns = new HashMap<String, List<String>>(); String cwd = System.getProperty("user.dir"); for (String path : filePathList) { // replace ~ with user home path = path.replace("~", System.getProperty("user.home")); // user may also specify files relative to current directory // add the current path if that is the case if (!(new File(path)).isAbsolute()) { logger.debug("Adding current working dir to relative file path."); path = cwd + localFSFileSep + path; } // check if the path contains any wildcards if (!path.contains("*") && !path.contains("?") && !(path.contains("[") && path.contains("]"))) { /* this file path doesn't have any wildcard, so we don't need to * expand it */ result.add(path); } else { // get the directory path int lastFileSepIndex = path.lastIndexOf(localFSFileSep); // SNOW-15203: if we don't find a default file sep, try "/" if it is not // the default file sep. if (lastFileSepIndex < 0 && !"/".equals(localFSFileSep)) { lastFileSepIndex = path.lastIndexOf("/"); } String loc = path.substring(0, lastFileSepIndex + 1); String filePattern = path.substring(lastFileSepIndex + 1); List<String> filePatterns = locationToFilePatterns.get(loc); if (filePatterns == null) { filePatterns = new ArrayList<String>(); locationToFilePatterns.put(loc, filePatterns); } filePatterns.add(filePattern); } } // For each location, list files and match against the patterns for (Map.Entry<String, List<String>> entry : locationToFilePatterns.entrySet()) { try { java.io.File dir = new java.io.File(entry.getKey()); logger.debug("Listing files under: {} with patterns: {}", entry.getKey(), entry.getValue().toString()); // The following currently ignore sub directories for (Object file : FileUtils.listFiles(dir, new WildcardFileFilter(entry.getValue()), null)) { result.add(((java.io.File) file).getCanonicalPath()); } } catch (Exception ex) { throw new SnowflakeSQLException(ex, SqlState.DATA_EXCEPTION, ErrorCode.FAIL_LIST_FILES.getMessageCode(), "Exception: " + ex.getMessage() + ", Dir=" + entry.getKey() + ", Patterns=" + entry.getValue().toString()); } } logger.debug("Expanded file paths: "); for (String filePath : result) { logger.debug("file: {}", filePath); } return result; }
java
public static String exceptionToString(Exception e) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); return sw.toString(); }
python
def parse(self, value): """ Parse date """ value = super(DateOpt, self).parse(value) if value is None: return None if isinstance(value, str): value = self.parse_date(value) if isinstance(value, datetime) and self.date_only: value = value.date() return value
python
def moving_average(data, periods, type='simple'): """ compute a <periods> period moving average. type is 'simple' | 'exponential' """ data = np.asarray(data) if type == 'simple': weights = np.ones(periods) else: weights = np.exp(np.linspace(-1., 0., periods)) weights /= weights.sum() mavg = np.convolve(data, weights, mode='full')[:len(data)] mavg[:periods] = mavg[periods] return mavg
java
public PagedList<HybridConnectionInner> listHybridConnections(final String resourceGroupName, final String name) { ServiceResponse<Page<HybridConnectionInner>> response = listHybridConnectionsSinglePageAsync(resourceGroupName, name).toBlocking().single(); return new PagedList<HybridConnectionInner>(response.body()) { @Override public Page<HybridConnectionInner> nextPage(String nextPageLink) { return listHybridConnectionsNextSinglePageAsync(nextPageLink).toBlocking().single().body(); } }; }
java
@Override public void set(final String name, final UserCredentials hostUser) throws IOException { assert this.proxyUGI == null; assert hostUser instanceof YarnProxyUser; LOG.log(Level.FINE, "UGI: user {0} copy from: {1}", new Object[] {name, hostUser}); final UserGroupInformation hostUGI = ((YarnProxyUser) hostUser).get(); final Collection<Token<? extends TokenIdentifier>> tokens = hostUGI.getCredentials().getAllTokens(); this.set(name, hostUGI, tokens.toArray(new Token[tokens.size()])); }
java
protected String getTypeName(JvmType type) { if (type != null) { if (type instanceof JvmDeclaredType) { final ITypeReferenceOwner owner = new StandardTypeReferenceOwner(this.services, type); return owner.toLightweightTypeReference(type).getHumanReadableName(); } return type.getSimpleName(); } return Messages.SARLHoverSignatureProvider_1; }
java
@Override public EEnum getPluginType() { if (pluginTypeEEnum == null) { pluginTypeEEnum = (EEnum) EPackage.Registry.INSTANCE.getEPackage(StorePackage.eNS_URI).getEClassifiers().get(105); } return pluginTypeEEnum; }
python
def _clean_datetime(self, obj): """Python objects want to be text.""" if isinstance(obj, datetime): # if it's not naive, put it on zulu time first: if obj.tzinfo is not None: obj = obj.astimezone(pytz.utc) return obj.isoformat()[:self.MAX_LENGTH] if isinstance(obj, date): return obj.isoformat()
python
def add_milestone(self, milestone, codelistoid="MILESTONES"): """ Add a milestone :param codelistoid: specify the CodeListOID (defaults to MILESTONES) :param str milestone: Milestone to add """ if milestone not in self.milestones.get(codelistoid, []): self._milestones.setdefault(codelistoid, []).append(milestone)
java
public static RubyEnumerator<String> foreach(InputStream inputStream) { return Ruby.Enumerator.of(new EachLineIterable(inputStream)); }
java
public void skipUntil(final TokenType... tokenTypes) { Set<TokenType> tokenTypeSet = Sets.newHashSet(tokenTypes); tokenTypeSet.add(Assist.END); while (!tokenTypeSet.contains(lexer.getCurrentToken().getType())) { lexer.nextToken(); } }
java
String getSpaceIdOrThrow(CMAResource resource, String param) { final String spaceId = resource.getSpaceId(); if (spaceId == null) { throw new IllegalArgumentException(String.format( "%s must have a space associated.", param)); } return spaceId; }
python
def logs(self, follow=False): """ Get logs from this container. Every item of the iterator contains one log line terminated with a newline. The logs are encoded (they are bytes, not str). Let's look at an example:: image = conu.DockerImage("fedora", tag="27") command = ["bash", "-c", "for x in `seq 1 5`; do echo $x; sleep 1; done"] container = image.run_via_binary(command=command) for line in container.logs(follow=True): print(line) This will output .. code-block:: none b'1\\n' b'2\\n' b'3\\n' b'4\\n' b'5\\n' :param follow: bool, provide new logs as they come :return: iterator (of bytes) """ return self.d.logs(self.get_id(), stream=True, follow=follow)
python
def end(self): """End of the standalone CLI.""" if not self.quiet: self.screen.end() # Exit from export modules self.stats.end() # Check Glances version versus PyPI one if self.outdated.is_outdated(): print("You are using Glances version {}, however version {} is available.".format( self.outdated.installed_version(), self.outdated.latest_version())) print("You should consider upgrading using: pip install --upgrade glances")
java
public void writeExternal(ObjectOutput out) throws IOException { out.writeInt(orderNumber); byte[] data = new JCRDateFormat().serialize(value); out.writeInt(data.length); out.write(data); }
java
public Payload add(String key, String value) { getData().put(key, value); return this; }
python
def load_module(ldr, fqname): '''Load `fqname` from under `ldr.fspath`. The `fqname` argument is the fully qualified module name, eg. "spam.eggs.ham". As explained above, when :: finder.find_module("spam.eggs.ham") is called, "spam.eggs" has already been imported and added to `sys.modules`. However, the `find_module()` method isn't necessarily always called during an actual import: meta tools that analyze import dependencies (such as freeze, Installer or py2exe) don't actually load modules, so a finder shouldn't depend on the parent package being available in `sys.modules`. The `load_module()` method has a few responsibilities that it must fulfill before it runs any code: * If there is an existing module object named 'fullname' in `sys.modules`, the loader must use that existing module. (Otherwise, the `reload()` builtin will not work correctly.) If a module named 'fullname' does not exist in `sys.modules`, the loader must create a new module object and add it to `sys.modules`. Note that the module object must be in `sys.modules` before the loader executes the module code. This is crucial because the module code may (directly or indirectly) import itself; adding it to `sys.modules` beforehand prevents unbounded recursion in the worst case and multiple loading in the best. If the load fails, the loader needs to remove any module it may have inserted into `sys.modules`. If the module was already in `sys.modules` then the loader should leave it alone. * The `__file__` attribute must be set. This must be a string, but it may be a dummy value, for example "<frozen>". The privilege of not having a `__file__` attribute at all is reserved for built-in modules. * The `__name__` attribute must be set. If one uses `imp.new_module()` then the attribute is set automatically. * If it's a package, the __path__ variable must be set. This must be a list, but may be empty if `__path__` has no further significance to the importer (more on this later). * The `__loader__` attribute must be set to the loader object. This is mostly for introspection and reloading, but can be used for importer-specific extras, for example getting data associated with an importer. The `__package__` attribute [8] must be set. If the module is a Python module (as opposed to a built-in module or a dynamically loaded extension), it should execute the module's code in the module's global name space (`module.__dict__`). [8] PEP 366: Main module explicit relative imports http://www.python.org/dev/peps/pep-0366/ ''' scope = ldr.scope.split('.') modpath = fqname.split('.') if scope != modpath[0:len(scope)]: raise AssertionError( "%s responsible for %s got request for %s" % ( ldr.__class__.__name__, ldr.scope, fqname, ) ) if fqname in sys.modules: mod = sys.modules[fqname] else: mod = sys.modules.setdefault(fqname, types.ModuleType(fqname)) mod.__loader__ = ldr fspath = ldr.path_to(fqname) mod.__file__ = str(fspath) if fs.is_package(fspath): mod.__path__ = [ldr.fspath] mod.__package__ = str(fqname) else: mod.__package__ = str(fqname.rpartition('.')[0]) exec(fs.get_code(fspath), mod.__dict__) return mod
java
@Override public void visit(ASTNode[] nodes, SourceUnit sourceUnit) { if (!(nodes[0] instanceof AnnotationNode) || !(nodes[1] instanceof AnnotatedNode)) { throw new IllegalArgumentException("Internal error: wrong types: " + nodes[0].getClass().getName() + " / " + nodes[1].getClass().getName()); } AnnotationNode node = (AnnotationNode) nodes[0]; AnnotatedNode parent = (AnnotatedNode) nodes[1]; ClassNode declaringClass = parent.getDeclaringClass(); if (parent instanceof FieldNode) { int modifiers = ((FieldNode) parent).getModifiers(); if ((modifiers & Modifier.FINAL) != 0) { String msg = "@griffon.transform.FXBindable cannot annotate a final property."; generateSyntaxErrorMessage(sourceUnit, node, msg); } addJavaFXProperty(sourceUnit, node, declaringClass, (FieldNode) parent); } else { addJavaFXPropertyToClass(sourceUnit, node, (ClassNode) parent); } }
python
def factorial(N): """Compute the factorial of N. If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial """ if N < len(FACTORIALS): return FACTORIALS[N] else: from scipy import special return int(special.factorial(N))
python
def id_unique(dict_id, name, lineno): """Returns True if dict_id not already used. Otherwise, invokes error""" if dict_id in name_dict: global error_occurred error_occurred = True print( "ERROR - {0:s} definition {1:s} at line {2:d} conflicts with {3:s}" .format(name, dict_id, lineno, name_dict[dict_id])) return False else: return True
python
def _get_content_hash(self): # type: () -> str """ Returns the sha256 hash of the sorted content of the pyproject file. """ content = self._local_config relevant_content = {} for key in self._relevant_keys: relevant_content[key] = content.get(key) content_hash = sha256( json.dumps(relevant_content, sort_keys=True).encode() ).hexdigest() return content_hash
python
def create_sitemap(app, exception): """Generates the sitemap.xml from the collected HTML page links""" site_url = app.builder.config.site_url or app.builder.config.html_baseurl if not site_url: print("sphinx-sitemap error: neither html_baseurl nor site_url " "are set in conf.py. Sitemap not built.") return if (not app.sitemap_links): print("sphinx-sitemap warning: No pages generated for sitemap.xml") return ET.register_namespace('xhtml', "http://www.w3.org/1999/xhtml") root = ET.Element("urlset") root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") get_locales(app, exception) if app.builder.config.version: version = app.builder.config.version + '/' else: version = app.builder.config.version for link in app.sitemap_links: url = ET.SubElement(root, "url") if app.builder.config.language is not None: ET.SubElement(url, "loc").text = site_url + \ app.builder.config.language + '/' + version + link if len(app.locales) > 0: for lang in app.locales: linktag = ET.SubElement( url, "{http://www.w3.org/1999/xhtml}link" ) linktag.set("rel", "alternate") linktag.set("hreflang", lang) linktag.set("href", site_url + lang + '/' + version + link) elif app.builder.config.version: ET.SubElement(url, "loc").text = site_url + version + link else: ET.SubElement(url, "loc").text = site_url + link filename = app.outdir + "/sitemap.xml" ET.ElementTree(root).write(filename, xml_declaration=True, encoding='utf-8', method="xml") print("sitemap.xml was generated for URL %s in %s" % (site_url, filename))
python
def gather_sorting_comparison(working_folder, ground_truths, use_multi_index=True): """ Loop over output folder in a tree to collect sorting from several sorter on several dataset and returns sythetic DataFrame with several metrics (performance, run_time, ...) Use SortingComparison internally. Parameters ---------- working_folder: str The folrder where sorter.run_sorters have done the job. ground_truths: dict A dict where each key is the recording label and each value the SortingExtractor containing the ground truth. use_multi_index: bool (True by default) Use (or not) multi index for output dataframe. Multiindex is composed from (rec_name, sorter_name). Returns ---------- comparisons: a dict of SortingComparison out_dataframes: a dict of DataFrame Return several usefull DataFrame to compare all results: * run_times * performances """ working_folder = Path(working_folder) comparisons = {} out_dataframes = {} # get run times: run_times = pd.read_csv(working_folder / 'run_time.csv', sep='\t', header=None) run_times.columns = ['rec_name', 'sorter_name', 'run_time'] run_times = run_times.set_index(['rec_name', 'sorter_name',]) out_dataframes['run_times'] = run_times #~ columns = ['tp_rate', 'fn_rate'] performances = pd.DataFrame(index=run_times.index, columns=_perf_keys) out_dataframes['performances'] = performances results = collect_results(working_folder) for rec_name, result_one_dataset in results.items(): #~ print() #~ print(rec_name) for sorter_name, sorting in result_one_dataset.items(): #~ print(sorter_name) #~ print(sorting) gt_sorting = ground_truths[rec_name] comp = SortingComparison(gt_sorting, sorting, count=True) comparisons[(rec_name, sorter_name)] = comp perf = compute_performance(comp, verbose=False, output='pandas') performances.loc[(rec_name, sorter_name), :] = perf if not use_multi_index: for k, df in out_dataframes.items(): out_dataframes[k] = df.reset_index() return comparisons, out_dataframes
python
def _parse_request_method(request: web.Request): """Parse Access-Control-Request-Method header of the preflight request """ method = request.headers.get(hdrs.ACCESS_CONTROL_REQUEST_METHOD) if method is None: raise web.HTTPForbidden( text="CORS preflight request failed: " "'Access-Control-Request-Method' header is not specified") # FIXME: validate method string (ABNF: method = token), if parsing # fails, raise HTTPForbidden. return method
python
def endpoint(value: Any) -> Any: """ Convert a endpoint string to the corresponding Endpoint instance type :param value: Endpoint string or subclass :return: """ if issubclass(type(value), Endpoint): return value elif isinstance(value, str): for api, cls in MANAGED_API.items(): if value.startswith(api + " "): return cls.from_inline(value) return UnknownEndpoint.from_inline(value) else: raise TypeError("Cannot convert {0} to endpoint".format(value))
java
void doInfo(){ this.infos.add("scanned directories: " + this.scDir); this.infos.add("unreadable directories: " + this.scDirUnreadable); this.infos.add("found files: " + this.scFiles); this.infos.add("unreadable files: " + this.scFilesUnreadable); }
java
@Override public FileOutputStream createOutputStream(String name, boolean append) throws IOException { return new TaggedFileOutputStream(new File(name), append); }
java
public static ProgressBar getProgressBar(float value) { ProgressBar res = new ProgressBar(value); String style = ""; if (value > 0.75) { style = "o-nearlyfull"; } if (value > 0.9) { style = "o-full"; } res.addStyleName(style); return res; }
python
def decode_values(fct): ''' Decode base64 encoded responses from Consul storage ''' def inner(*args, **kwargs): ''' decorator ''' data = fct(*args, **kwargs) if 'error' not in data: for result in data: result['Value'] = base64.b64decode(result['Value']) return data return inner
java
private void createProbeSenders(ArgoClientContext context) throws TransportConfigException { senders = new ArrayList<ProbeSender>(); if (config.usesNetworkInterface()) { try { for (String niName : context.getAvailableNetworkInterfaces(config.requiresMulticast())) { try { Transport transport = instantiateTransportClass(config.getClassname()); transport.initialize(transportProps, niName); ProbeSender sender = new ProbeSender(transport); senders.add(sender); } catch (TransportConfigException e) { LOGGER.warn( e.getLocalizedMessage()); } } } catch (SocketException e) { throw new TransportConfigException("Error getting available network interfaces", e); } } else { Transport transport = instantiateTransportClass(config.getClassname()); transport.initialize(transportProps, ""); ProbeSender sender = new ProbeSender(transport); senders.add(sender); } }
java
private static boolean slowEquals(byte[] a, byte[] b) { int diff = a.length ^ b.length; for(int i = 0; i < a.length && i < b.length; i++) { diff |= a[i] ^ b[i]; } return diff == 0; }
python
def lookup(self, dotted_path, lineno=None): """Given a dotted path in the format ``class_name`` or ``class_name:method_name`` this performs an alias lookup. For methods the line number must be supplied or the result is unreliable. """ rv = None try: rv = rustcall( _lib.lsm_proguard_mapping_convert_dotted_path, self._get_ptr(), dotted_path.encode('utf-8'), lineno or 0) return _ffi.string(rv).decode('utf-8', 'replace') finally: if rv is not None: _lib.lsm_buffer_free(rv)
java
@Override public final boolean satisfiedForFlowOrdering(final FilterOutcomes outcomes) { if (isAlwaysSatisfiedRequirement()) { return true; } final ComponentRequirement componentRequirement = _hasComponentRequirement.getComponentRequirement(); if (componentRequirement == null) { return true; } final Collection<FilterOutcome> dependencies = componentRequirement.getProcessingDependencies(); for (final FilterOutcome filterOutcome : dependencies) { final boolean contains = outcomes.contains(filterOutcome); if (!contains) { return false; } } return componentRequirement.isSatisfied(null, outcomes); }
java
@Override public void setCenter(double x, double y, double z) { this.cxProperty.set(x); this.cyProperty.set(y); this.czProperty.set(z); }
java
protected Statement methodInvoker(FrameworkMethod method, Object test) { return new ParameterizedInvokeMethod(method, test, methodArgs); }
python
def _execute_commands_from_dir(self, directory): """Re-attempt to split and execute the failed commands""" # Get file paths and contents commands = get_commands_from_dir(directory) # Execute failed commands again print('\tAttempting to execute {0} failed commands'.format(len(commands))) return self.execute(commands, ignored_commands=None, execute_fails=True)
python
def ruleName(self): ''' overrides from parent class ''' return _('%s at %s' % (self.room.name, self.room.location.name))
python
def _bse_cli_get_basis(args): '''Handles the get-basis subcommand''' return api.get_basis( name=args.basis, elements=args.elements, version=args.version, fmt=args.fmt, uncontract_general=args.unc_gen, uncontract_spdf=args.unc_spdf, uncontract_segmented=args.unc_seg, make_general=args.make_gen, optimize_general=args.opt_gen, data_dir=args.data_dir, header=not args.noheader)
java
private File getProjectDir(String projectId, boolean create) throws IOException { File projectDir = new File(getKeenCacheDirectory(), projectId); if (create && !projectDir.exists()) { KeenLogging.log("Cache directory for project '" + projectId + "' doesn't exist. " + "Creating it."); if (!projectDir.mkdirs()) { throw new IOException("Could not create project cache directory '" + projectDir.getAbsolutePath() + "'"); } } return projectDir; }
java
public List<UserDto> selectByOrderedLogins(DbSession session, Collection<String> logins) { List<UserDto> unordered = selectByLogins(session, logins); return logins.stream() .map(new LoginToUser(unordered)) .filter(Objects::nonNull) .collect(Collectors.toList()); }
python
def get_ssid(_, data): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n313. Positional arguments: data -- bytearray data to read. Returns: String. """ converted = list() for i in range(len(data)): try: c = unichr(data[i]) except NameError: c = chr(data[i]) if unicodedata.category(c) != 'Cc' and c not in (' ', '\\'): converted.append(c) elif c == '\0': converted.append(c) elif c == ' ' and i not in (0, len(data)): converted.append(' ') else: converted.append('\\{0:02x}'.format(data[i])) return ''.join(converted)
java
public static Specification<JpaTarget> hasTags(final String[] tagNames, final Boolean selectTargetWithNoTag) { return (targetRoot, query, cb) -> { final Predicate predicate = getPredicate(targetRoot, cb, selectTargetWithNoTag, tagNames); query.distinct(true); return predicate; }; }
java
private void initComponents() { fileLabel = new JLabel(); fileTextField = new ZapTextField(); browseButton = new JButton(); nameLabel = new JLabel(); nameTextField = new ZapTextField(); slotLabel = new JLabel(); slotTextField = new ZapTextField(); slotListIndexLabel = new JLabel(); slotListIndexTextField = new ZapTextField(); addButton = new JButton(); deleteButton = new JButton(); closeButton = new JButton(); driverScrollPane = new JScrollPane(); driverTable = new JTable(); setTitle(Constant.messages.getString("certificates.pkcs11.drivers.title")); fileLabel.setText(Constant.messages.getString("certificates.pkcs11.drivers.label.path")); browseButton.setText(Constant.messages.getString("certificates.pkcs11.drivers.button.browse")); browseButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent evt) { browseButtonActionPerformed(evt); } }); nameLabel.setText(Constant.messages.getString("certificates.pkcs11.drivers.label.name")); slotLabel.setText(Constant.messages.getString("certificates.pkcs11.drivers.label.slot")); slotListIndexLabel.setText(Constant.messages.getString("certificates.pkcs11.drivers.label.slotIndex")); addButton.setText(Constant.messages.getString("certificates.pkcs11.drivers.button.add")); addButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent evt) { addButtonActionPerformed(evt); } }); deleteButton.setText(Constant.messages.getString("certificates.pkcs11.drivers.button.delete")); deleteButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent evt) { deleteButtonActionPerformed(evt); } }); closeButton.setText(Constant.messages.getString("certificates.pkcs11.drivers.button.close")); closeButton.addActionListener(new ActionListener() { @Override public void actionPerformed(ActionEvent evt) { closeButtonActionPerformed(evt); } }); driverTable.setModel(driverTableModel); driverScrollPane.setViewportView(driverTable); // When experimental SlotListIndex support is used, the slotTextField is disabled (and vice versa), // as only one of these parameters is actually used. if (!Model.getSingleton().getOptionsParam().getExperimentalFeaturesParam().isExerimentalSliSupportEnabled()) { slotTextField.setEnabled(false); } final GroupLayout layout = new GroupLayout(getContentPane()); getContentPane().setLayout(layout); layout.setHorizontalGroup(layout .createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup( layout.createSequentialGroup() .addContainerGap() .addGroup( layout.createParallelGroup( GroupLayout.Alignment.LEADING) .addComponent(fileLabel) .addComponent(nameLabel) .addComponent(slotLabel) .addComponent(slotListIndexLabel) .addGroup( layout.createSequentialGroup() .addGroup( layout.createParallelGroup( GroupLayout.Alignment.TRAILING, false) .addComponent( nameTextField, GroupLayout.Alignment.LEADING) .addComponent( slotTextField, GroupLayout.Alignment.LEADING) .addComponent( slotListIndexTextField, GroupLayout.Alignment.LEADING) .addComponent( fileTextField, GroupLayout.Alignment.LEADING, GroupLayout.DEFAULT_SIZE, 322, Short.MAX_VALUE)) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addGroup( layout.createParallelGroup( GroupLayout.Alignment.LEADING) .addComponent( addButton, GroupLayout.DEFAULT_SIZE, 80, Short.MAX_VALUE) .addComponent( browseButton)))) .addContainerGap(165, Short.MAX_VALUE)) .addGroup( GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addGap(499, 499, 499) .addComponent(closeButton, GroupLayout.DEFAULT_SIZE, 74, Short.MAX_VALUE).addContainerGap()) .addGroup( layout.createSequentialGroup() .addContainerGap() .addComponent(driverScrollPane, GroupLayout.DEFAULT_SIZE, 561, Short.MAX_VALUE).addContainerGap()) .addGroup( GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addContainerGap(499, Short.MAX_VALUE) .addComponent(deleteButton).addContainerGap())); layout.setVerticalGroup(layout .createParallelGroup(GroupLayout.Alignment.LEADING) .addGroup( GroupLayout.Alignment.TRAILING, layout.createSequentialGroup() .addContainerGap() .addComponent(fileLabel) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addGroup( layout.createParallelGroup( GroupLayout.Alignment.LEADING, false) .addComponent(browseButton, 0, 0, Short.MAX_VALUE) .addComponent(fileTextField)) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addComponent(nameLabel) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addGroup( layout.createParallelGroup( GroupLayout.Alignment.BASELINE) .addComponent( nameTextField, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addComponent(slotLabel) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addGroup( layout.createParallelGroup( GroupLayout.Alignment.BASELINE) .addComponent( slotTextField, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)) .addGap(28, 28, 28) .addComponent(slotListIndexLabel) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addGroup( layout.createParallelGroup( GroupLayout.Alignment.BASELINE) .addComponent( slotListIndexTextField, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE) .addComponent( addButton, GroupLayout.PREFERRED_SIZE, 19, GroupLayout.PREFERRED_SIZE)) .addGap(28, 28, 28) .addComponent(driverScrollPane, GroupLayout.PREFERRED_SIZE, 195, GroupLayout.PREFERRED_SIZE) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED) .addComponent(deleteButton) .addPreferredGap( LayoutStyle.ComponentPlacement.RELATED, 9, Short.MAX_VALUE) .addComponent(closeButton, GroupLayout.PREFERRED_SIZE, 10, GroupLayout.PREFERRED_SIZE) .addContainerGap())); layout.linkSize(SwingConstants.VERTICAL, new Component[] { addButton, browseButton, closeButton, deleteButton, fileTextField, nameTextField }); for (int i=0; i<driverTableModel.getColumnCount(); i++) { driverTable.getColumnModel().getColumn(i).setPreferredWidth(driverTableModel.getPreferredWith(i)); } pack(); }
python
def autosave(self, index): """ Autosave a file. Do nothing if the `changed_since_autosave` flag is not set or the file is newly created (and thus not named by the user). Otherwise, save a copy of the file with the name given by `self.get_autosave_filename()` and clear the `changed_since_autosave` flag. Errors raised when saving are silently ignored. Args: index (int): index into self.stack.data """ finfo = self.stack.data[index] document = finfo.editor.document() if not document.changed_since_autosave or finfo.newly_created: return autosave_filename = self.get_autosave_filename(finfo.filename) logger.debug('Autosaving %s to %s', finfo.filename, autosave_filename) try: self.stack._write_to_file(finfo, autosave_filename) document.changed_since_autosave = False except EnvironmentError as error: action = (_('Error while autosaving {} to {}') .format(finfo.filename, autosave_filename)) msgbox = AutosaveErrorDialog(action, error) msgbox.exec_if_enabled()
python
def get_cmdclass(): ''' A ``cmdclass`` that works around a setuptools deficiency. There is no need to build wheels when installing a package, however some versions of setuptools seem to mandate this. This is a hacky workaround that modifies the ``cmdclass`` returned by versioneer so that not having wheel installed is not a fatal error. ''' cmdclass = versioneer.get_cmdclass() try: from wheel.bdist_wheel import bdist_wheel except ImportError: # pip is not claiming for bdist_wheel when wheel is not installed bdist_wheel = None if bdist_wheel is not None: cmdclass["bdist_wheel"] = bdist_wheel return cmdclass
python
def get_current_live_chat(self): """ Check if there is a live chat on the go, so that we should take over the AskMAMA page with the live chat. """ now = datetime.now() chat = self.upcoming_live_chat() if chat and chat.is_in_progress(): return chat return None
java
@Override public String getNarInfoFile(final String groupId, final String artifactId, final String type) { return "META-INF/nar/" + groupId + "/" + artifactId + "/" + NarInfo.NAR_PROPERTIES; }
python
def prepare(self, request): """ Performs any preparations necessary for the Format. :param request: The webob Request object describing the request. :returns: A list of dictionary values needed by the convert() method. """ data = [] for conv in self.conversions: data.append(conv.prepare(request)) return data
java
public ServiceFuture<OpenShiftManagedClusterInner> beginUpdateTagsAsync(String resourceGroupName, String resourceName, final ServiceCallback<OpenShiftManagedClusterInner> serviceCallback) { return ServiceFuture.fromResponse(beginUpdateTagsWithServiceResponseAsync(resourceGroupName, resourceName), serviceCallback); }
python
def login(self, request, session, creds, segments): """ Called to check the credentials of a user. Here we extend guard's implementation to preauthenticate users if they have a valid persistent session. @type request: L{nevow.inevow.IRequest} @param request: The HTTP request being handled. @type session: L{nevow.guard.GuardSession} @param session: The user's current session. @type creds: L{twisted.cred.credentials.ICredentials} @param creds: The credentials the user presented. @type segments: L{tuple} @param segments: The remaining segments of the URL. @return: A deferred firing with the user's avatar. """ self._maybeCleanSessions() if isinstance(creds, credentials.Anonymous): preauth = self.authenticatedUserForKey(session.uid) if preauth is not None: self.savorSessionCookie(request) creds = userbase.Preauthenticated(preauth) def cbLoginSuccess(input): """ User authenticated successfully. Create the persistent session, and associate it with the username. (XXX it doesn't work like this now) """ user = request.args.get('username') if user is not None: # create a database session and associate it with this user cookieValue = session.uid if request.args.get('rememberMe'): self.createSessionForKey(cookieValue, creds.username) self.savorSessionCookie(request) return input return ( guard.SessionWrapper.login( self, request, session, creds, segments) .addCallback(cbLoginSuccess))
python
def eps(self, nodeids=None): """ Return the EPs with the given *nodeid*, or all EPs. Args: nodeids: an iterable of nodeids of EPs to return; if `None`, return all EPs """ if nodeids is None: nodeids = self._nodeids _eps = self._eps return [_eps[nodeid] for nodeid in nodeids]
python
def _get_colors(self, name): """ A magical function which maintains the sanity of vertex and face colors. * If colors have been explicitly stored or changed, they are considered user data, stored in self._data (DataStore), and are returned immediately when requested. * If colors have never been set, a (count,4) tiled copy of the default diffuse color will be stored in the cache ** the CRC on creation for these cached default colors will also be stored ** if the cached color array is altered (different CRC than when it was created) we consider that now to be user data and the array is moved from the cache to the DataStore. Parameters ----------- name: str, 'face', or 'vertex' Returns ----------- colors: (count, 4) uint8, RGBA colors """ try: counts = {'face': len(self.mesh.faces), 'vertex': len(self.mesh.vertices)} count = counts[name] except AttributeError: count = None # the face or vertex colors key_colors = str(name) + '_colors' # the initial crc of the key_crc = key_colors + '_crc' if key_colors in self._data: # if a user has explicitly stored or changed the color it # will be in data return self._data[key_colors] elif key_colors in self._cache: # if the colors have been autogenerated already they # will be in the cache colors = self._cache[key_colors] # if the cached colors have been changed since creation we move # them to data if colors.crc() != self._cache[key_crc]: # call the setter on the property using exec # this avoids having to pass a setter to this function if name == 'face': self.face_colors = colors elif name == 'vertex': self.vertex_colors = colors else: raise ValueError('unsupported name!!!') self._cache.verify() else: # colors have never been accessed if self.kind is None: # no colors are defined, so create a (count, 4) tiled # copy of the default color colors = np.tile(self.defaults['material_diffuse'], (count, 1)) elif (self.kind == 'vertex' and name == 'face'): colors = vertex_to_face_color( vertex_colors=self.vertex_colors, faces=self.mesh.faces) elif (self.kind == 'face' and name == 'vertex'): colors = face_to_vertex_color( mesh=self.mesh, face_colors=self.face_colors) else: raise ValueError('self.kind not accepted values!!') if (count is not None and colors.shape != (count, 4)): raise ValueError('face colors incorrect shape!') # subclass the array to track for changes using a CRC colors = caching.tracked_array(colors) # put the generated colors and their initial checksum into cache self._cache[key_colors] = colors self._cache[key_crc] = colors.crc() return colors
java
public CouchDbConnector createConnector() { val connector = new StdCouchDbConnector(couchDbProperties.getDbName(), getCouchDbInstance(), objectMapperFactory); LOGGER.debug("Connector created: [{}]", connector); return connector; }
java
protected Document getWebXmlDocument(String baseDir) throws ParserConfigurationException, FactoryConfigurationError, SAXException, IOException { File webXml = new File(baseDir, WEB_XML_FILE_PATH); DocumentBuilder docBuilder = DocumentBuilderFactory.newInstance() .newDocumentBuilder(); docBuilder.setEntityResolver(new EntityResolver() { public InputSource resolveEntity(String publicId, String systemId) throws SAXException, IOException { return null; } }); Document doc = docBuilder.parse(webXml); return doc; }
python
def _asym_utility_transform(systematic_utilities, alt_IDs, rows_to_alts, eta, intercept_params, shape_ref_position=None, intercept_ref_pos=None, *args, **kwargs): """ Parameters ---------- systematic_utilities : 1D ndarray. Contains the systematic utilities for each each available alternative for each observation. All elements should be ints, floats, or longs. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D ndarray. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. eta : 1D ndarray. Each element should be an int, float, or long. There should be one value per transformed shape parameter. Note that if there are J possible alternatives in the dataset, then there should be J - 1 elements in `eta`. intercept_params : 1D ndarray or None. If an array, each element should be an int, float, or long. For identifiability, there should be J- 1 elements where J is the total number of observed alternatives for this dataset. shape_ref_position : int. Specifies the position in the array of natural shape parameters that should be equal to 1 - the sum of the other elements. Specifies the alternative in the ordered array of unique alternatives that is not having its shape parameter estimated (to ensure identifiability). intercept_ref_pos : int, or None, optional. Specifies the index of the alternative, in the ordered array of unique alternatives, that is not having its intercept parameter estimated (in order to ensure identifiability). Should only be None if intercept_params is None. Default == None. Returns ------- transformed_utilities : 2D ndarray. Should have shape `(systematic_utilities.shape[0], 1)`. The returned array contains the values of the transformed index for this model. """ ########## # Convert the reduced shape parameters to the natural shape parameters ########## natural_shape_params = _convert_eta_to_c(eta, shape_ref_position) ########## # Calculate the transformed utilities from the natural shape parameters ########## # Create a vector which contains the appropriate shape for each row in # the design matrix long_shapes = rows_to_alts.dot(natural_shape_params) # Determine the total number of alternatives num_alts = rows_to_alts.shape[1] # Get the natural log of the long_shapes log_long_shapes = np.log(long_shapes) # Guard against underflow, aka long_shapes too close to zero log_long_shapes[np.isneginf(log_long_shapes)] = -1 * max_comp_value # Get the natural log of (1 - long_shapes) / (J - 1) log_1_sub_long_shapes = np.log((1 - long_shapes) / float(num_alts - 1)) # Guard against underflow, aka 1 - long_shapes too close to zero small_idx = np.isneginf(log_1_sub_long_shapes) log_1_sub_long_shapes[small_idx] = -1 * max_comp_value # Compute the transformed utilities multiplier = ((systematic_utilities >= 0) * log_long_shapes + (systematic_utilities < 0) * log_1_sub_long_shapes) transformed_utilities = log_long_shapes - systematic_utilities * multiplier # Perform a guard for shape --> 1 and V --> infinity. # It is DEFINITELY not clear if this is the correct thing to do. The limit # might not even exist, and there is no clear multivariate L'Hopital's # rule. So this is an arbitrary decision weird_case = np.isposinf(systematic_utilities) * (long_shapes == 1) transformed_utilities[weird_case] = 0 # Account for the outside intercept parameters if there are any if intercept_params is not None and intercept_ref_pos is not None: # Get a list of all the indices (or row indices) corresponding to the # alternatives whose intercept parameters are being estimated. needed_idxs = range(rows_to_alts.shape[1]) needed_idxs.remove(intercept_ref_pos) if len(intercept_params.shape) > 1 and intercept_params.shape[1] > 1: # Get an array of zeros with shape # (num_possible_alternatives, num_parameter_samples) all_intercepts = np.zeros((rows_to_alts.shape[1], intercept_params.shape[1])) # For alternatives having their intercept estimated, replace the # zeros with the current value of the estimated intercepts all_intercepts[needed_idxs, :] = intercept_params else: # Get an array of zeros with shape (num_possible_alternatives,) all_intercepts = np.zeros(rows_to_alts.shape[1]) # For alternatives having their intercept estimated, replace the # zeros with the current value of the estimated intercepts all_intercepts[needed_idxs] = intercept_params # Add the intercept values to f(x, beta, c) transformed_utilities += rows_to_alts.dot(all_intercepts) # Perform final guards against over/underflow in the transformations transformed_utilities[np.isposinf(transformed_utilities)] = max_comp_value transformed_utilities[np.isneginf(transformed_utilities)] = -max_comp_value # Be sure to return a 2D array since other functions will be expecting that if len(transformed_utilities.shape) == 1: transformed_utilities = transformed_utilities[:, np.newaxis] return transformed_utilities
java
public void createRTreeIndex(String tableName, String geometryColumnName) { String sqlName = GeoPackageProperties.getProperty(SQL_PROPERTY, CREATE_PROPERTY); executeSQL(sqlName, tableName, geometryColumnName); }
java
public Observable<ServiceResponse<PacketCaptureQueryStatusResultInner>> beginGetStatusWithServiceResponseAsync(String resourceGroupName, String networkWatcherName, String packetCaptureName) { if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } if (networkWatcherName == null) { throw new IllegalArgumentException("Parameter networkWatcherName is required and cannot be null."); } if (packetCaptureName == null) { throw new IllegalArgumentException("Parameter packetCaptureName is required and cannot be null."); } if (this.client.subscriptionId() == null) { throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); } final String apiVersion = "2017-10-01"; return service.beginGetStatus(resourceGroupName, networkWatcherName, packetCaptureName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<PacketCaptureQueryStatusResultInner>>>() { @Override public Observable<ServiceResponse<PacketCaptureQueryStatusResultInner>> call(Response<ResponseBody> response) { try { ServiceResponse<PacketCaptureQueryStatusResultInner> clientResponse = beginGetStatusDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
java
public void typeMissing() { this.parameterMissing( ProtocolConstants.Parameters.Delete.TYPE, "Please provide the type of the delete request. Use \"" + DeleteRequestType.USER.getIdentifier() + "\" to delete a user, \"" + DeleteRequestType.FOLLOW.getIdentifier() + "\" to delete a follow edge or \"" + DeleteRequestType.STATUS_UPDATE.getIdentifier() + "\" to delete a status update."); }
python
def load_sgf(self, sgf_data): """Load, persist a sensor_graph file. The data passed in `sgf_data` can either be a path or the already loaded sgf lines as a string. It is determined to be sgf lines if there is a '\n' character in the data, otherwise it is interpreted as a path. Note that this scenario just loads the sensor_graph directly into the persisted sensor_graph inside the device. You will still need to reset the device for the sensor_graph to enabled and run. Args: sgf_data (str): Either the path to an sgf file or its contents as a string. """ if '\n' not in sgf_data: with open(sgf_data, "r") as infile: sgf_data = infile.read() model = DeviceModel() parser = SensorGraphFileParser() parser.parse_file(data=sgf_data) parser.compile(model) opt = SensorGraphOptimizer() opt.optimize(parser.sensor_graph, model=model) sensor_graph = parser.sensor_graph self._logger.info("Loading sensor_graph with %d nodes, %d streamers and %d configs", len(sensor_graph.nodes), len(sensor_graph.streamers), len(sensor_graph.config_database)) # Directly load the sensor_graph into our persisted storage self.sensor_graph.persisted_nodes = sensor_graph.dump_nodes() self.sensor_graph.persisted_streamers = sensor_graph.dump_streamers() self.sensor_graph.persisted_constants = [] for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()): reading = IOTileReading(stream.encode(), 0, value) self.sensor_graph.persisted_constants.append((stream, reading)) self.sensor_graph.persisted_exists = True # Clear all config variables and load in those from this sgf file self.config_database.clear() for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for conf_var, (conf_type, conf_val) in sorted(sensor_graph.config_database[slot].items()): self.config_database.add_direct(slot, conf_var, conf_type, conf_val) # If we have an app tag and version set program them in app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if app_tag is not None: if app_version is None: app_version = "0.0" self.app_info = (app_tag, app_version)
python
def _cram_to_fastq_regions(regions, cram_file, dirs, data): """Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together. """ base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) fnames = run_multicore(_cram_to_fastq_region, [(cram_file, work_dir, base_name, region, data) for region in regions], data["config"]) # check if we have paired or single end data if any(not _is_gzip_empty(p1) for p1, p2, s in fnames): out = [[p1, p2] for p1, p2, s in fnames] else: out = [[s] for p1, p2, s in fnames] return out, work_dir
python
def setJoiner(self, joiner): """ Sets the join operator type for this entry widget to the given value. :param joiner | <QueryCompound.Op> """ text = QueryCompound.Op[joiner].upper() if self._first: if self._last: self.uiJoinSBTN.setCurrentAction(None) else: act = self.uiJoinSBTN.findAction(text) self.uiJoinSBTN.setCurrentAction(act) else: self.uiJoinSBTN.actions()[0].setText(text)
python
def bench(client, n): """ Benchmark n requests """ items = list(range(n)) # Time client publish operations # ------------------------------ started = time.time() for i in items: client.publish('test', i) duration = time.time() - started print('Publisher client stats:') util.print_stats(n, duration)
python
def normalize_residuals(self, points): """Normalize residuals by the level of the variable.""" residuals = self.evaluate_residual(points) solutions = self.evaluate_solution(points) return [resid / soln for resid, soln in zip(residuals, solutions)]
java
public static void setPreferredAttributeNameForRoadNumber(String name) { final Preferences prefs = Preferences.userNodeForPackage(RoadNetworkConstants.class); if (prefs != null) { if (name == null || "".equals(name) || DEFAULT_ATTR_ROAD_NUMBER.equalsIgnoreCase(name)) { //$NON-NLS-1$ prefs.remove("ROAD_NUMBER_ATTR_NAME"); //$NON-NLS-1$ } else { prefs.put("ROAD_NUMBER_ATTR_NAME", name); //$NON-NLS-1$ } } }
java
public VaultExtendedInfoResourceInner createOrUpdate(String resourceGroupName, String vaultName, VaultExtendedInfoResourceInner resourceResourceExtendedInfoDetails) { return createOrUpdateWithServiceResponseAsync(resourceGroupName, vaultName, resourceResourceExtendedInfoDetails).toBlocking().single().body(); }
java
public Observable<ServiceResponse<IssuerBundle>> setCertificateIssuerWithServiceResponseAsync(String vaultBaseUrl, String issuerName, String provider) { if (vaultBaseUrl == null) { throw new IllegalArgumentException("Parameter vaultBaseUrl is required and cannot be null."); } if (issuerName == null) { throw new IllegalArgumentException("Parameter issuerName is required and cannot be null."); } if (this.apiVersion() == null) { throw new IllegalArgumentException("Parameter this.apiVersion() is required and cannot be null."); } if (provider == null) { throw new IllegalArgumentException("Parameter provider is required and cannot be null."); } final IssuerCredentials credentials = null; final OrganizationDetails organizationDetails = null; final IssuerAttributes attributes = null; CertificateIssuerSetParameters parameter = new CertificateIssuerSetParameters(); parameter.withProvider(provider); parameter.withCredentials(null); parameter.withOrganizationDetails(null); parameter.withAttributes(null); String parameterizedHost = Joiner.on(", ").join("{vaultBaseUrl}", vaultBaseUrl); return service.setCertificateIssuer(issuerName, this.apiVersion(), this.acceptLanguage(), parameter, parameterizedHost, this.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<IssuerBundle>>>() { @Override public Observable<ServiceResponse<IssuerBundle>> call(Response<ResponseBody> response) { try { ServiceResponse<IssuerBundle> clientResponse = setCertificateIssuerDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
python
def tags(self): '''List tags (py_version, abi, platform) supported by this wheel.''' return itertools.product( self.py_version.split('.'), self.abi.split('.'), self.platform.split('.'), )
java
private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { try { s.defaultReadObject(); this.queue = new Object[q.size()]; comparator = q.comparator(); addAll(q); } finally { q = null; } }
python
def get_serializer(self, *args, **kwargs): """ if an array is passed, set serializer to many """ if isinstance(kwargs.get('data', {}), list): kwargs['many'] = True return super().get_serializer(*args, **kwargs)
java
public static appfwlearningsettings[] get_filtered(nitro_service service, filtervalue[] filter) throws Exception{ appfwlearningsettings obj = new appfwlearningsettings(); options option = new options(); option.set_filter(filter); appfwlearningsettings[] response = (appfwlearningsettings[]) obj.getfiltered(service, option); return response; }
java
public void marshall(DescribeProvisioningArtifactRequest describeProvisioningArtifactRequest, ProtocolMarshaller protocolMarshaller) { if (describeProvisioningArtifactRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(describeProvisioningArtifactRequest.getAcceptLanguage(), ACCEPTLANGUAGE_BINDING); protocolMarshaller.marshall(describeProvisioningArtifactRequest.getProvisioningArtifactId(), PROVISIONINGARTIFACTID_BINDING); protocolMarshaller.marshall(describeProvisioningArtifactRequest.getProductId(), PRODUCTID_BINDING); protocolMarshaller.marshall(describeProvisioningArtifactRequest.getVerbose(), VERBOSE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
private void handleDataAvailable() { InputStream inputStream = serialPort.getInputStream(); try { while (inputStream.available() > 0) { byte inputByte = (byte) inputStream.read(); if (inputByte == -1) { log.error("Reached end of stream trying to read serial port."); stop(); return; } Message message = configuration.getCommandParser().handleInputStream(inputByte, inputStream); if (message != null) { log.debug("Routing message {}", message.getClass().getSimpleName()); routeMessage(message); } } } catch (IOException e) { log.error("IO Error reading from serial port. Closing connection."); e.printStackTrace(); stop(); } }
python
def set_static_ip_address(self, context, msg): """Process request for setting rules in iptables. In cases that static ip address is assigned for a VM, it is needed to update the iptables rule for that address. """ args = jsonutils.loads(msg) macaddr = args.get('mac') ipaddr = args.get('ip') LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', ( {'mac': macaddr, 'ip': ipaddr})) # Add the request into queue for processing. event_type = 'cli.static_ip.set' payload = {'mac': macaddr, 'ip': ipaddr} timestamp = time.ctime() data = (event_type, payload) pri = self.obj.PRI_LOW_START self.obj.pqueue.put((pri, timestamp, data)) LOG.debug('Added request to add static ip into queue.') return 0