language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def check_is_a_string(var, allow_none=False): """ Calls is_a_string and raises a type error if the check fails. """ if not is_a_string(var, allow_none=allow_none): raise TypeError("var must be a string, however type(var) is {}" .format(type(var)))
java
public long foldLeft(long seed, LongBinaryOperator accumulator) { long[] box = new long[] { seed }; forEachOrdered(t -> box[0] = accumulator.applyAsLong(box[0], t)); return box[0]; }
python
def transChicane(bend_length=None, bend_field=None, drift_length=None, gamma=None): """ Transport matrix of chicane composed of four rbends and three drifts between them :param bend_length: rbend width in [m] :param bend_field: rbend magnetic field in [T] :param drift_length: drift length, list or tuple of three elements, in [m] single float number stands for same length for three drifts :param gamma: electron energy, gamma value :return: 6x6 numpy array """ if None in (bend_length, bend_field, drift_length, gamma): print("warning: 'bend_length', 'bend_field', 'drift_length', 'gamma' should be positive float numbers.") m = np.eye(6, 6, dtype=np.float64) return m else: if isinstance(drift_length, tuple) or isinstance(drift_length, list): if len(drift_length) == 1: dflist = drift_length * 3 elif len(drift_length) == 2: dflist = [] dflist.extend(drift_length) dflist.append(drift_length[0]) elif len(drift_length) >= 3: dflist = drift_length[0:3] if dflist[0] != dflist[-1]: print("warning: chicane is not symmetric.") else: print("drift_length is not a valid list or tuple.") else: dflist = [] dflist.extend([drift_length, drift_length, drift_length]) m0 = 9.10938215e-31 e0 = 1.602176487e-19 c0 = 299792458.0 rho = np.sqrt(gamma ** 2 - 1) * m0 * c0 / bend_field / e0 theta = np.arcsin(bend_length / rho) m_rb_1 = transRbend(theta, rho, gamma, -1) m_rb_2 = transRbend(-theta, -rho, gamma, 1) m_rb_3 = transRbend(-theta, -rho, gamma, -1) m_rb_4 = transRbend(theta, rho, gamma, 1) m_df_12 = transDrift(dflist[0], gamma) m_df_23 = transDrift(dflist[1], gamma) m_df_34 = transDrift(dflist[2], gamma) m = reduce(np.dot, [m_rb_1, m_df_12, m_rb_2, m_df_23, m_rb_3, m_df_34, m_rb_4]) return m
python
def _index_document(self, document, force=False): """ Adds document to the index. """ query = text(""" INSERT INTO dataset_index(vid, title, keywords, doc) VALUES(:vid, :title, :keywords, :doc); """) self.backend.library.database.connection.execute(query, **document)
java
public Observable<ServiceResponse<LanguageBatchResult>> detectLanguageWithServiceResponseAsync(List<Input> documents) { if (this.client.azureRegion() == null) { throw new IllegalArgumentException("Parameter this.client.azureRegion() is required and cannot be null."); } Validator.validate(documents); BatchInput input = new BatchInput(); input.withDocuments(documents); String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.client.azureRegion()); return service.detectLanguage(this.client.acceptLanguage(), input, parameterizedHost, this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<LanguageBatchResult>>>() { @Override public Observable<ServiceResponse<LanguageBatchResult>> call(Response<ResponseBody> response) { try { ServiceResponse<LanguageBatchResult> clientResponse = detectLanguageDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
java
public void setTranslationArrayStandaloneLongMonthNames(String[] newTranslationArray) { if (newTranslationArray == null) { String[] defaultLongMonthNames = ExtraDateStrings.getDefaultStandaloneLongMonthNamesForLocale(locale); this.translationArrayStandaloneLongMonthNames = defaultLongMonthNames; } else { this.translationArrayStandaloneLongMonthNames = newTranslationArray; } zDrawIndependentCalendarPanelIfNeeded(); }
java
protected boolean generatePythonConstructors(String container, List<? extends XtendMember> members, PyAppendable it, IExtraLanguageGeneratorContext context) { // Prepare field initialization boolean hasConstructor = false; for (final XtendMember member : members) { if (context.getCancelIndicator().isCanceled()) { return false; } if (member instanceof SarlConstructor) { hasConstructor = true; generate(member, it, context); it.newLine(); } } if (context.getCancelIndicator().isCanceled()) { return false; } if (!hasConstructor) { it.append("def __init__(self):"); //$NON-NLS-1$ it.increaseIndentation().newLine(); final List<SarlField> fields = context.getMultimapValues(INSTANCE_VARIABLES_MEMENTO, container); if (fields.isEmpty()) { it.append("pass"); //$NON-NLS-1$ } else { for (final SarlField field : fields) { generatePythonField(field, it, context); } } it.decreaseIndentation().newLine(); } return true; }
java
void setInstallOrOpenCallback(Branch.BranchReferralInitListener callback) { synchronized (reqQueueLockObject) { for (ServerRequest req : queue) { if (req != null) { if (req instanceof ServerRequestRegisterInstall) { ((ServerRequestRegisterInstall) req).setInitFinishedCallback(callback); } else if (req instanceof ServerRequestRegisterOpen) { ((ServerRequestRegisterOpen) req).setInitFinishedCallback(callback); } } } } }
python
def sfs_folded(ac, n=None): """Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k. """ # check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
python
def is_same(type1, type2): """returns True, if type1 and type2 are same types""" nake_type1 = remove_declarated(type1) nake_type2 = remove_declarated(type2) return nake_type1 == nake_type2
python
def _get_bin_width(stdev, count): """Return the histogram's optimal bin width based on Sturges http://www.jstor.org/pss/2965501 """ w = int(round((3.5 * stdev) / (count ** (1.0 / 3)))) if w: return w else: return 1
java
public static snmpmib get(nitro_service service, options option) throws Exception{ snmpmib obj = new snmpmib(); snmpmib[] response = (snmpmib[])obj.get_resources(service,option); return response[0]; }
java
private Object[] getPersonGroupMemberKeys(IGroupMember gm) { Object[] keys = null; EntityIdentifier ei = gm.getUnderlyingEntityIdentifier(); IPersonAttributes attr = personAttributeDao.getPerson(ei.getKey()); if (attr != null && attr.getAttributes() != null && !attr.getAttributes().isEmpty()) { IPerson p = PersonFactory.createPerson(); p.setAttributes(attr.getAttributes()); keys = p.getAttributeValues(memberOfAttributeName); log.debug("Groups for person {} is: {}", p.getUserName(), Arrays.toString(keys)); } return keys != null ? keys : new Object[] {}; }
python
def strip_sdist_extras(filelist): """Strip generated files that are only present in source distributions. We also strip files that are ignored for other reasons, like command line arguments, setup.cfg rules or MANIFEST.in rules. """ return [name for name in filelist if not file_matches(name, IGNORE) and not file_matches_regexps(name, IGNORE_REGEXPS)]
java
AvroYarnJobSubmissionParameters fromInputStream(final InputStream inputStream) throws IOException { final JsonDecoder decoder = DecoderFactory.get().jsonDecoder( AvroYarnJobSubmissionParameters.getClassSchema(), inputStream); final SpecificDatumReader<AvroYarnJobSubmissionParameters> reader = new SpecificDatumReader<>( AvroYarnJobSubmissionParameters.class); return reader.read(null, decoder); }
java
public static Object toArray( JSONArray jsonArray, Object root, JsonConfig jsonConfig ) { Class objectClass = root.getClass(); if( jsonArray.size() == 0 ){ return Array.newInstance( objectClass, 0 ); } int[] dimensions = JSONArray.getDimensions( jsonArray ); Object array = Array.newInstance( objectClass == null ? Object.class : objectClass, dimensions ); int size = jsonArray.size(); for( int i = 0; i < size; i++ ){ Object value = jsonArray.get( i ); if( JSONUtils.isNull( value ) ){ Array.set( array, i, null ); }else{ Class type = value.getClass(); if( JSONArray.class.isAssignableFrom( type ) ){ Array.set( array, i, toArray( (JSONArray) value, root, jsonConfig ) ); }else if( String.class.isAssignableFrom( type ) || Boolean.class.isAssignableFrom( type ) || JSONUtils.isNumber( type ) || Character.class.isAssignableFrom( type ) || JSONFunction.class.isAssignableFrom( type ) ){ if( objectClass != null && !objectClass.isAssignableFrom( type ) ){ value = JSONUtils.getMorpherRegistry() .morph( objectClass, value ); } Array.set( array, i, value ); }else{ try{ Object newRoot = jsonConfig.getNewBeanInstanceStrategy() .newInstance( root.getClass(), null ); Array.set( array, i, JSONObject.toBean( (JSONObject) value, newRoot, jsonConfig ) ); }catch( JSONException jsone ){ throw jsone; }catch( Exception e ){ throw new JSONException( e ); } } } } return array; }
python
def mutator(arg=None): """Structures mutator functions by allowing handlers to be registered for different types of event. When the decorated function is called with an initial value and an event, it will call the handler that has been registered for that type of event. It works like singledispatch, which it uses. The difference is that when the decorated function is called, this decorator dispatches according to the type of last call arg, which fits better with reduce(). The builtin Python function reduce() is used by the library to replay a sequence of events against an initial state. If a mutator function is given to reduce(), along with a list of events and an initializer, reduce() will call the mutator function once for each event in the list, but the initializer will be the first value, and the event will be the last argument, and we want to dispatch according to the type of the event. It happens that singledispatch is coded to switch on the type of the first argument, which makes it unsuitable for structuring a mutator function without the modifications introduced here. The other aspect introduced by this decorator function is the option to set the type of the handled entity in the decorator. When an entity is replayed from scratch, in other words when all its events are replayed, the initial state is None. The handler which handles the first event in the sequence will probably construct an object instance. It is possible to write the type into the handler, but that makes the entity more difficult to subclass because you will also need to write a handler for it. If the decorator is invoked with the type, when the initial value passed as a call arg to the mutator function is None, the handler will instead receive the type of the entity, which it can use to construct the entity object. .. code:: class Entity(object): class Created(object): pass @mutator(Entity) def mutate(initial, event): raise NotImplementedError(type(event)) @mutate.register(Entity.Created) def _(initial, event): return initial(**event.__dict__) entity = mutate(None, Entity.Created()) """ domain_class = None def _mutator(func): wrapped = singledispatch(func) @wraps(wrapped) def wrapper(initial, event): initial = initial or domain_class return wrapped.dispatch(type(event))(initial, event) wrapper.register = wrapped.register return wrapper if isfunction(arg): return _mutator(arg) else: domain_class = arg return _mutator
java
@Override public HandlerRegistration addGeometryIndexHighlightBeginHandler(GeometryIndexHighlightBeginHandler handler) { return editingService.getEventBus().addHandler(GeometryIndexHighlightBeginHandler.TYPE, handler); }
java
public Object remove(Object key) { if (key == null) return null; purge(); int hash = hashCode(key); int index = indexFor(hash); Entry previous = null; Entry entry = table[index]; while (entry != null) { if ((hash == entry.hash) && equals(key, entry.getKey())) { if (previous == null) table[index] = entry.next; else previous.next = entry.next; this.size--; modCount++; return entry.getValue(); } previous = entry; entry = entry.next; } return null; }
java
public DirectConnectGatewayAssociationProposal withExistingAllowedPrefixesToDirectConnectGateway( RouteFilterPrefix... existingAllowedPrefixesToDirectConnectGateway) { if (this.existingAllowedPrefixesToDirectConnectGateway == null) { setExistingAllowedPrefixesToDirectConnectGateway(new com.amazonaws.internal.SdkInternalList<RouteFilterPrefix>( existingAllowedPrefixesToDirectConnectGateway.length)); } for (RouteFilterPrefix ele : existingAllowedPrefixesToDirectConnectGateway) { this.existingAllowedPrefixesToDirectConnectGateway.add(ele); } return this; }
python
def set_mrl(self, mrl, *options): """Set the MRL to play. Warning: most audio and video options, such as text renderer, have no effects on an individual media. These options must be set at the vlc.Instance or vlc.MediaPlayer instanciation. @param mrl: The MRL @param options: optional media option=value strings @return: the Media object """ m = self.get_instance().media_new(mrl, *options) self.set_media(m) return m
java
final public void decrement(long val) { if (!enabled) return; lastSampleTime = System.currentTimeMillis(); if (!sync) { count -= val; } else { synchronized (this) { count -= val; } } }
python
def load(self, laser_plugin: LaserPlugin) -> None: """ Loads the plugin :param laser_plugin: plugin that will be loaded in the symbolic virtual machine """ log.info("Loading plugin: {}".format(str(laser_plugin))) laser_plugin.initialize(self.symbolic_vm) self.laser_plugins.append(laser_plugin)
java
public boolean hasEquivalentTransitions(TimeZone tz, long start, long end) { return hasEquivalentTransitions(tz, start, end, false); }
java
public String convertIfcDamperTypeEnumToString(EDataType eDataType, Object instanceValue) { return instanceValue == null ? null : instanceValue.toString(); }
java
private MessageBuffer getNextBuffer() throws IOException { MessageBuffer next = in.next(); if (next == null) { throw new MessageInsufficientBufferException(); } assert (buffer != null); totalReadBytes += buffer.size(); return next; }
python
def gridsearch(self, X, y, weights=None, return_scores=False, keep_best=True, objective='auto', progress=True, **param_grids): """ Performs a grid search over a space of parameters for a given objective Warnings -------- ``gridsearch`` is lazy and will not remove useless combinations from the search space, eg. >>> n_splines=np.arange(5,10), fit_splines=[True, False] will result in 10 loops, of which 5 are equivalent because ``fit_splines = False`` Also, it is not recommended to search over a grid that alternates between known scales and unknown scales, as the scores of the candidate models will not be comparable. Parameters ---------- X : array-like input data of shape (n_samples, m_features) y : array-like label data of shape (n_samples,) weights : array-like shape (n_samples,), optional sample weights return_scores : boolean, optional whether to return the hyperpamaters and score for each element in the grid keep_best : boolean, optional whether to keep the best GAM as self. objective : {'auto', 'AIC', 'AICc', 'GCV', 'UBRE'}, optional Metric to optimize. If `auto`, then grid search will optimize `GCV` for models with unknown scale and `UBRE` for models with known scale. progress : bool, optional whether to display a progress bar **kwargs pairs of parameters and iterables of floats, or parameters and iterables of iterables of floats. If no parameter are specified, ``lam=np.logspace(-3, 3, 11)`` is used. This results in a 11 points, placed diagonally across lam space. If grid is iterable of iterables of floats, the outer iterable must have length ``m_features``. the cartesian product of the subgrids in the grid will be tested. If grid is a 2d numpy array, each row of the array will be tested. The method will make a grid of all the combinations of the parameters and fit a GAM to each combination. Returns ------- if ``return_scores=True``: model_scores: dict containing each fitted model as keys and corresponding objective scores as values else: self: ie possibly the newly fitted model Examples -------- For a model with 4 terms, and where we expect 4 lam values, our search space for lam must have 4 dimensions. We can search the space in 3 ways: 1. via cartesian product by specifying the grid as a list. our grid search will consider ``11 ** 4`` points: >>> lam = np.logspace(-3, 3, 11) >>> lams = [lam] * 4 >>> gam.gridsearch(X, y, lam=lams) 2. directly by specifying the grid as a np.ndarray. This is useful for when the dimensionality of the search space is very large, and we would prefer to execute a randomized search: >>> lams = np.exp(np.random.random(50, 4) * 6 - 3) >>> gam.gridsearch(X, y, lam=lams) 3. copying grids for parameters with multiple dimensions. if we specify a 1D np.ndarray for lam, we are implicitly testing the space where all points have the same value >>> gam.gridsearch(lam=np.logspace(-3, 3, 11)) is equivalent to: >>> lam = np.logspace(-3, 3, 11) >>> lams = np.array([lam] * 4) >>> gam.gridsearch(X, y, lam=lams) """ # check if model fitted if not self._is_fitted: self._validate_params() self._validate_data_dep_params(X) y = check_y(y, self.link, self.distribution, verbose=self.verbose) X = check_X(X, verbose=self.verbose) check_X_y(X, y) if weights is not None: weights = np.array(weights).astype('f').ravel() weights = check_array(weights, name='sample weights', ndim=1, verbose=self.verbose) check_lengths(y, weights) else: weights = np.ones_like(y).astype('float64') # validate objective if objective not in ['auto', 'GCV', 'UBRE', 'AIC', 'AICc']: raise ValueError("objective mut be in "\ "['auto', 'GCV', 'UBRE', 'AIC', 'AICc'], '\ 'but found objective = {}".format(objective)) # check objective if self.distribution._known_scale: if objective == 'GCV': raise ValueError('GCV should be used for models with'\ 'unknown scale') if objective == 'auto': objective = 'UBRE' else: if objective == 'UBRE': raise ValueError('UBRE should be used for models with '\ 'known scale') if objective == 'auto': objective = 'GCV' # if no params, then set up default gridsearch if not bool(param_grids): param_grids['lam'] = np.logspace(-3, 3, 11) # validate params admissible_params = list(self.get_params()) + self._plural params = [] grids = [] for param, grid in list(param_grids.items()): # check param exists if param not in (admissible_params): raise ValueError('unknown parameter: {}'.format(param)) # check grid is iterable at all if not (isiterable(grid) and (len(grid) > 1)): \ raise ValueError('{} grid must either be iterable of ' 'iterables, or an iterable of lengnth > 1, '\ 'but found {}'.format(param, grid)) # prepare grid if any(isiterable(g) for g in grid): # get required parameter shape target_len = len(flatten(getattr(self, param))) # check if cartesian product needed cartesian = (not isinstance(grid, np.ndarray) or grid.ndim != 2) # build grid grid = [np.atleast_1d(g) for g in grid] # check chape msg = '{} grid should have {} columns, '\ 'but found grid with {} columns'.format(param, target_len, len(grid)) if cartesian: if len(grid) != target_len: raise ValueError(msg) grid = combine(*grid) if not all([len(subgrid) == target_len for subgrid in grid]): raise ValueError(msg) # save param name and grid params.append(param) grids.append(grid) # build a list of dicts of candidate model params param_grid_list = [] for candidate in combine(*grids): param_grid_list.append(dict(zip(params,candidate))) # set up data collection best_model = None # keep the best model best_score = np.inf scores = [] models = [] # check if our model has been fitted already and store it if self._is_fitted: models.append(self) scores.append(self.statistics_[objective]) # our model is currently the best best_model = models[-1] best_score = scores[-1] # make progressbar optional if progress: pbar = ProgressBar() else: pbar = lambda x: x # loop through candidate model params for param_grid in pbar(param_grid_list): try: # try fitting # define new model gam = deepcopy(self) gam.set_params(self.get_params()) gam.set_params(**param_grid) # warm start with parameters from previous build if models: coef = models[-1].coef_ gam.set_params(coef_=coef, force=True, verbose=False) gam.fit(X, y, weights) except ValueError as error: msg = str(error) + '\non model with params:\n' + str(param_grid) msg += '\nskipping...\n' if self.verbose: warnings.warn(msg) continue # record results models.append(gam) scores.append(gam.statistics_[objective]) # track best if scores[-1] < best_score: best_model = models[-1] best_score = scores[-1] # problems if len(models) == 0: msg = 'No models were fitted.' if self.verbose: warnings.warn(msg) return self # copy over the best if keep_best: self.set_params(deep=True, force=True, **best_model.get_params(deep=True)) if return_scores: return OrderedDict(zip(models, scores)) else: return self
java
public String doSecurePost(String host, String path, String postData, int port, Map<String, String> headers, int timeout) throws UnknownHostException, ConnectException, IOException { return doHttpCall(host, path, postData, port, headers, timeout, true); }
java
static void register(Transcoder transcoder) { Entry entry = makeEntry(transcoder.source, transcoder.destination); if (entry.transcoder != null) throw new TranscoderException(ErrorMessages.ERR_TRANSCODER_ALREADY_REGISTERED, new String(transcoder.source + " to " + new String(transcoder.destination))); entry.transcoder = transcoder; }
python
def convert_l_to_rgba(self, row, result): """ Convert a grayscale image to RGBA. This method assumes the alpha channel in result is already correctly initialized. """ for i in range(len(row) // 3): for j in range(3): result[(4 * i) + j] = row[i]
java
public static Map<String,String> unserializeBuildInfo(final String buildInfo) throws IOException { final ObjectMapper mapper = new ObjectMapper(); mapper.disable(MapperFeature.USE_GETTERS_AS_SETTERS); return mapper.readValue(buildInfo, new TypeReference<Map<String, Object>>(){}); }
java
public ListFleetsResult withFleetSummaryList(FleetSummary... fleetSummaryList) { if (this.fleetSummaryList == null) { setFleetSummaryList(new java.util.ArrayList<FleetSummary>(fleetSummaryList.length)); } for (FleetSummary ele : fleetSummaryList) { this.fleetSummaryList.add(ele); } return this; }
java
private static boolean pointEqualsPoint_(Point2D pt_a, Point2D pt_b, double tolerance, ProgressTracker progress_tracker) { if (Point2D.sqrDistance(pt_a, pt_b) <= tolerance * tolerance) return true; return false; }
java
protected final String getForEFSave(final Class<?> pClass) { if (SeGoodsSpecifics.class == pClass) { return PrcSeGdSpecEmbFlSave.class.getSimpleName(); } else if (SeServiceSpecifics.class == pClass) { return PrcSeSrvSpecEmbFlSave.class.getSimpleName(); } return null; }
python
def setBendField(self, x): """ set bend magnetic field :param x: new bend field to be assigned, [T] :return: None """ if x != self.bend_field: self.bend_field = x self.refresh = True
python
def acc_difference(points): """ Computes the accelaration difference between each adjacent point Args: points (:obj:`Point`) Returns: :obj:`list` of int: Indexes of changepoints """ data = [0] for before, after in pairwise(points): data.append(before.acc - after.acc) return data
java
public void setUserImage(CmsObject cms, CmsUser user, String rootPath) throws CmsException { CmsFile tempFile = cms.readFile(cms.getRequestContext().removeSiteRoot(rootPath)); CmsImageScaler scaler = new CmsImageScaler(tempFile.getContents(), tempFile.getRootPath()); if (scaler.isValid()) { scaler.setType(2); scaler.setHeight(192); scaler.setWidth(192); byte[] content = scaler.scaleImage(tempFile); String previousImage = (String)user.getAdditionalInfo(CmsUserIconHelper.USER_IMAGE_INFO); String newFileName = USER_IMAGE_FOLDER + user.getId().toString() + "_" + System.currentTimeMillis() + getSuffix(tempFile.getName()); CmsObject adminCms = OpenCms.initCmsObject(m_adminCms); CmsProject tempProject = adminCms.createTempfileProject(); adminCms.getRequestContext().setCurrentProject(tempProject); if (adminCms.existsResource(newFileName)) { // a user image of the given name already exists, just write the new content CmsFile imageFile = adminCms.readFile(newFileName); adminCms.lockResource(imageFile); imageFile.setContents(content); adminCms.writeFile(imageFile); adminCms.writePropertyObject( newFileName, new CmsProperty(CmsPropertyDefinition.PROPERTY_IMAGE_SIZE, null, "w:192,h:192")); } else { // create a new user image file adminCms.createResource( newFileName, OpenCms.getResourceManager().getResourceType(CmsResourceTypeImage.getStaticTypeName()), content, Collections.singletonList( new CmsProperty(CmsPropertyDefinition.PROPERTY_IMAGE_SIZE, null, "w:192,h:192"))); } if (newFileName.equals(previousImage)) { previousImage = null; } if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(previousImage)) { previousImage = (String)user.getAdditionalInfo(CmsUserIconHelper.USER_IMAGE_INFO); if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(previousImage) && cms.existsResource(newFileName, CmsResourceFilter.ONLY_VISIBLE_NO_DELETED)) { try { adminCms.lockResource(previousImage); adminCms.deleteResource(previousImage, CmsResource.DELETE_REMOVE_SIBLINGS); } catch (CmsException e) { LOG.error("Error deleting previous user image.", e); } } } user.setAdditionalInfo(CmsUserIconHelper.USER_IMAGE_INFO, newFileName); adminCms.writeUser(user); try { OpenCms.getPublishManager().publishProject(adminCms); } catch (Exception e) { LOG.error("Error publishing user image resources.", e); } } }
python
def get_hexagram(method='THREE COIN'): """ Return one or two hexagrams using any of a variety of divination methods. The ``NAIVE`` method simply returns a uniformally random ``int`` between ``1`` and ``64``. All other methods return a 2-tuple where the first value represents the starting hexagram and the second represents the 'moving to' hexagram. To find the name and unicode glyph for a found hexagram, look it up in the module-level `hexagrams` dict. Args: method (str): ``'THREE COIN'``, ``'YARROW'``, or ``'NAIVE'``, the divination method model to use. Note that the three coin and yarrow methods are not actually literally simulated, but rather statistical models reflecting the methods are passed to `blur.rand` functions to accurately approximate them. Returns: int: If ``method == 'NAIVE'``, the ``int`` key of the found hexagram. Otherwise a `tuple` will be returned. tuple: A 2-tuple of form ``(int, int)`` where the first value is key of the starting hexagram and the second is that of the 'moving-to' hexagram. Raises: ValueError if ``method`` is invalid Examples: The function being used alone: :: >>> get_hexagram(method='THREE COIN') # doctest: +SKIP # Might be... (55, 2) >>> get_hexagram(method='YARROW') # doctest: +SKIP # Might be... (41, 27) >>> get_hexagram(method='NAIVE') # doctest: +SKIP # Might be... 26 Usage in combination with hexagram lookup: :: >>> grams = get_hexagram() >>> grams # doctest: +SKIP (47, 42) # unpack hexagrams for convenient reference >>> initial, moving_to = grams >>> hexagrams[initial] # doctest: +SKIP ('䷮', '困', 'Confining') >>> hexagrams[moving_to] # doctest: +SKIP ('䷩', '益', 'Augmenting') >>> print('{} moving to {}'.format( ... hexagrams[initial][2], ... hexagrams[moving_to][2]) ... ) # doctest: +SKIP Confining moving to Augmenting """ if method == 'THREE COIN': weights = [('MOVING YANG', 2), ('MOVING YIN', 2), ('STATIC YANG', 6), ('STATIC YIN', 6)] elif method == 'YARROW': weights = [('MOVING YANG', 8), ('MOVING YIN', 2), ('STATIC YANG', 11), ('STATIC YIN', 17)] elif method == 'NAIVE': return random.randint(1, 64) else: raise ValueError('`method` value of "{}" is invalid') hexagram_1 = [] hexagram_2 = [] for i in range(6): roll = weighted_choice(weights) if roll == 'MOVING YANG': hexagram_1.append(1) hexagram_2.append(0) elif roll == 'MOVING YIN': hexagram_1.append(0) hexagram_2.append(1) elif roll == 'STATIC YANG': hexagram_1.append(1) hexagram_2.append(1) else: # if roll == 'STATIC YIN' hexagram_1.append(0) hexagram_2.append(0) # Convert hexagrams lists into tuples hexagram_1 = tuple(hexagram_1) hexagram_2 = tuple(hexagram_2) return (_hexagram_dict[hexagram_1], _hexagram_dict[hexagram_2])
java
private boolean isDerived(JavaClass fqCls, FQMethod key) { try { for (JavaClass infCls : fqCls.getInterfaces()) { for (Method infMethod : infCls.getMethods()) { if (key.getMethodName().equals(infMethod.getName())) { if (infMethod.getGenericSignature() != null) { if (SignatureUtils.compareGenericSignature(infMethod.getGenericSignature(), key.getSignature())) { return true; } } else if (infMethod.getSignature().equals(key.getSignature())) { return true; } } } } JavaClass superClass = fqCls.getSuperClass(); if ((superClass == null) || Values.DOTTED_JAVA_LANG_OBJECT.equals(superClass.getClassName())) { return false; } for (Method superMethod : superClass.getMethods()) { if (key.getMethodName().equals(superMethod.getName())) { if (superMethod.getGenericSignature() != null) { if (SignatureUtils.compareGenericSignature(superMethod.getGenericSignature(), key.getSignature())) { return true; } } else if (superMethod.getSignature().equals(key.getSignature())) { return true; } } } return isDerived(superClass, key); } catch (ClassNotFoundException cnfe) { bugReporter.reportMissingClass(cnfe); return true; } }
python
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
python
def applyQuery( self ): """ Sets the query for this widget from the quick query text builder. """ query = Q.fromString(nativestring(self.uiQueryTXT.text())) self.setQuery(query) self.uiQueryTXT.setText('')
python
def p_iteration_statement_5(self, p): """ iteration_statement : \ FOR LPAREN VAR identifier IN expr RPAREN statement """ p[0] = ast.ForIn(item=ast.VarDecl(p[4]), iterable=p[6], statement=p[8])
java
public WellRestedRequestBuilder globalHeaders(Map<String, String> globalHeaders) { this.globalHeaders = WellRestedUtil.buildHeaders(globalHeaders); return this; }
java
public static void exports(Xml root, Animation animation) { Check.notNull(root); Check.notNull(animation); final Xml node = root.createChild(ANIMATION); node.writeString(ANIMATION_NAME, animation.getName()); node.writeInteger(ANIMATION_START, animation.getFirst()); node.writeInteger(ANIMATION_END, animation.getLast()); node.writeDouble(ANIMATION_SPEED, animation.getSpeed()); node.writeBoolean(ANIMATION_REVERSED, animation.hasReverse()); node.writeBoolean(ANIMATION_REPEAT, animation.hasRepeat()); }
java
public static String serializeSet(Set<?> set) { if (set == null) { set = new HashSet<String>(); } XStream xstream = new XStream(new DomDriver()); return xstream.toXML(set); }
python
def encoded(self): """ This function will encode all the attributes to 256 bit integers :return: """ encoded = {} for i in range(len(self.credType.names)): self.credType.names[i] attr_types = self.credType.attrTypes[i] for at in attr_types: attrName = at.name if attrName in self._vals: if at.encode: encoded[attrName] = encodeAttr(self._vals[attrName]) else: encoded[attrName] = self._vals[at.name] return encoded
java
@Override protected void perturbation(List<DoubleSolution> swarm) { nonUniformMutation.setCurrentIteration(currentIteration); for (int i = 0; i < swarm.size(); i++) { if (i % 3 == 0) { nonUniformMutation.execute(swarm.get(i)); } else if (i % 3 == 1) { uniformMutation.execute(swarm.get(i)); } } }
python
def license_fallback(vendor_dir, sdist_name): """Hardcoded license URLs. Check when updating if those are still needed""" libname = libname_from_dir(sdist_name) if libname not in HARDCODED_LICENSE_URLS: raise ValueError('No hardcoded URL for {} license'.format(libname)) url = HARDCODED_LICENSE_URLS[libname] _, _, name = url.rpartition('/') dest = license_destination(vendor_dir, libname, name) r = requests.get(url, allow_redirects=True) log('Downloading {}'.format(url)) r.raise_for_status() dest.write_bytes(r.content)
python
def cp(args): """ find folder -type l | %prog cp Copy all the softlinks to the current folder, using absolute paths """ p = OptionParser(cp.__doc__) fp = sys.stdin for link_name in fp: link_name = link_name.strip() if not op.exists(link_name): continue source = get_abs_path(link_name) link_name = op.basename(link_name) if not op.exists(link_name): os.symlink(source, link_name) logging.debug(" => ".join((source, link_name)))
java
public static void join(Writer writer, List<? extends EncodedPair> pairs, char pairSep, char nameValSep) throws IOException { join(writer, pairs, pairSep, nameValSep, false, false); }
java
public ServiceFuture<ContainerServiceInner> getByResourceGroupAsync(String resourceGroupName, String containerServiceName, final ServiceCallback<ContainerServiceInner> serviceCallback) { return ServiceFuture.fromResponse(getByResourceGroupWithServiceResponseAsync(resourceGroupName, containerServiceName), serviceCallback); }
java
public static CharSequence getJSDate(Date date) { Calendar calendar = Calendar.getInstance(); calendar.setTime(date); return getJSDate(calendar); }
java
public static Bitmap scale(Bitmap src, int dw, int dh) { Bitmap res = Bitmap.createBitmap(dw, dh, Bitmap.Config.ARGB_8888); scale(src, res); return res; }
java
public Vector3f mul(float x, float y, float z) { return mul(x, y, z, thisOrNew()); }
java
public Map<String, I_CmsFormatterBean> getFormatterSelection(String containerTypes, int containerWidth) { Map<String, I_CmsFormatterBean> result = new LinkedHashMap<String, I_CmsFormatterBean>(); for (I_CmsFormatterBean formatter : Collections2.filter( m_allFormatters, new MatchesTypeOrWidth(containerTypes, containerWidth))) { if (formatter.isFromFormatterConfigFile()) { result.put(formatter.getId(), formatter); } else { result.put( CmsFormatterConfig.SCHEMA_FORMATTER_ID + formatter.getJspStructureId().toString(), formatter); } } return result; }
python
def split_type(cls, type_name): """Split type of a type name with CardinalityField suffix into its parts. :param type_name: Type name (as string). :return: Tuple (type_basename, cardinality) """ if cls.matches_type(type_name): basename = type_name[:-1] cardinality = cls.from_char_map[type_name[-1]] else: # -- ASSUME: Cardinality.one cardinality = Cardinality.one basename = type_name return (basename, cardinality)
java
public SectionHeader getSectionHeader(String sectionName) { for (SectionHeader entry : headers) { if (entry.getName().equals(sectionName)) { return entry; } } throw new IllegalArgumentException( "invalid section name, no section header found"); }
python
def within_bbox(self, bbox): """ :param bbox: a quartet (min_lon, min_lat, max_lon, max_lat) :returns: site IDs within the bounding box """ min_lon, min_lat, max_lon, max_lat = bbox lons, lats = self.array['lon'], self.array['lat'] if cross_idl(lons.min(), lons.max()) or cross_idl(min_lon, max_lon): lons = lons % 360 min_lon, max_lon = min_lon % 360, max_lon % 360 mask = (min_lon < lons) * (lons < max_lon) * \ (min_lat < lats) * (lats < max_lat) return mask.nonzero()[0]
java
public Timestamp withLocalOffset(Integer offset) { Precision precision = getPrecision(); if (precision.alwaysUnknownOffset() || safeEquals(offset, getLocalOffset())) { return this; } Timestamp ts = createFromUtcFields(precision, getZYear(), getZMonth(), getZDay(), getZHour(), getZMinute(), getZSecond(), getZFractionalSecond(), offset); return ts; }
java
@Requires("label != null") protected static boolean labelIsResolved(Label label) { try { label.getOffset(); } catch (IllegalStateException e) { return false; } return true; }
python
def _calc_overlap_coef( markers1: dict, markers2: dict, ): """Calculate overlap coefficient between the values of two dictionaries Note: dict values must be sets """ overlap_coef=np.zeros((len(markers1), len(markers2))) j=0 for marker_group in markers1: tmp = [len(markers2[i].intersection(markers1[marker_group]))/ max(min(len(markers2[i]), len(markers1[marker_group])),1) for i in markers2.keys()] overlap_coef[j,:] = tmp j += 1 return overlap_coef
java
public String getLevel() { try { LoggerContext loggerContext = (LoggerContext) LoggerFactory.getILoggerFactory(); return loggerContext.getLogger("root").getLevel().toString(); } catch (Exception e) { e.printStackTrace(); return ""; } }
python
def parse_xrefs(self, token): """Search token for +value+ and !variable! style references. Be careful to not xref a new variable. """ out, end = [], 0 token = token.replace("\\n", "\n") for m in re.finditer(self.xref_registry, token, re.VERBOSE | re.DOTALL): if m.start(0) > end: out.append(String(token[end:m.start(0)], self.current_obj)) end = m.end(0) if m.group("type"): xref_type = {"+": ValueXRef, "!": VariableXRef, "@": ElementXRef}[m.group("type")] out.append(xref_type(m.group("xref"), self.current_obj)) elif m.group("uri") is not None: path = m.group("uri") out.append(MetaURI(path, self.current_obj)) elif m.group("repeat") is not None: repeat, separator, nodups = m.group("repeat", "separator", "nodups") if separator is None: separator = "" if nodups is None: nodups = "" out.append(MetaRepeat(self.parse_xrefs(repeat), separator, nodups, self.current_obj)) elif m.group("block") is not None: path = m.group("block") out.append(MetaBlock(path, self.current_obj)) elif m.group("choices") is not None: choices = m.group("choices") out.append(MetaChoice(choices, self.current_obj)) else: startval, endval = m.group("start", "end") out.append(MetaRange(startval, endval, self.current_obj)) if end < len(token): out.append(String(token[end:], self.current_obj)) return out
python
def _get_args(cls, **kwargs): """Parse style and locale. Argument location precedence: kwargs > view_args > query """ csl_args = { 'style': cls._default_style, 'locale': cls._default_locale } if has_request_context(): parser = FlaskParser(locations=('view_args', 'query')) csl_args.update(parser.parse(cls._user_args, request)) csl_args.update({k: kwargs[k] for k in ('style', 'locale') if k in kwargs}) try: csl_args['style'] = get_style_filepath(csl_args['style'].lower()) except StyleNotFoundError: if has_request_context(): raise StyleNotFoundRESTError(csl_args['style']) raise return csl_args
python
def register(): """Register DDO of a new asset --- tags: - ddo consumes: - application/json parameters: - in: body name: body required: true description: DDO of the asset. schema: type: object required: - "@context" - id - created - publicKey - authentication - proof - service properties: "@context": description: example: https://w3id.org/future-method/v1 type: string id: description: ID of the asset. example: did:op:123456789abcdefghi type: string created: description: date of ddo creation. example: "2016-02-08T16:02:20Z" type: string publicKey: type: array description: List of public keys. example: [{"id": "did:op:123456789abcdefghi#keys-1"}, {"type": "Ed25519VerificationKey2018"}, {"owner": "did:op:123456789abcdefghi"}, {"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}] authentication: type: array description: List of authentication mechanisms. example: [{"type": "RsaSignatureAuthentication2018"}, {"publicKey": "did:op:123456789abcdefghi#keys-1"}] proof: type: dictionary description: Information about the creation and creator of the asset. example: {"type": "UUIDSignature", "created": "2016-02-08T16:02:20Z", "creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja", "signatureValue": "QNB13Y7Q9...1tzjn4w==" } service: type: array description: List of services. example: [{"type": "Access", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${ pubKey}&serviceId={serviceId}&url={url}"}, {"type": "Compute", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${ pubKey}&serviceId={serviceId}&algo={algo}&container={container}"}, { "type": "Metadata", "serviceDefinitionId": "2", "serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}", "metadata": { "base": { "name": "UK Weather information 2011", "type": "dataset", "description": "Weather information of UK including temperature and humidity", "dateCreated": "2012-02-01T10:55:11Z", "author": "Met Office", "license": "CC-BY", "copyrightHolder": "Met Office", "compression": "zip", "workExample": "stationId,latitude,longitude,datetime, temperature,humidity/n423432fsd,51.509865,-0.118092, 2011-01-01T10:55:11+00:00,7.2,68", "files": [{ "contentLength": "4535431", "contentType": "text/csv", "encoding": "UTF-8", "compression": "zip", "resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932" } ], "encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv", "links": [{ "name": "Sample of Asset Data", "type": "sample", "url": "https://foo.com/sample.csv" }, { "name": "Data Format Definition", "type": "format", "AssetID": "4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea" } ], "inLanguage": "en", "tags": "weather, uk, 2011, temperature, humidity", "price": 10, "checksum": "38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262" }, "curation": { "rating": 0.93, "numVotes": 123, "schema": "Binary Voting" }, "additionalInformation": { "updateFrecuency": "yearly", "structuredMarkup": [{ "uri": "http://skos.um.es/unescothes/C01194/jsonld", "mediaType": "application/ld+json" }, { "uri": "http://skos.um.es/unescothes/C01194/turtle", "mediaType": "text/turtle" } ] } } }] responses: 201: description: Asset successfully registered. 400: description: One of the required attributes is missing. 404: description: Invalid asset data. 500: description: Error """ assert isinstance(request.json, dict), 'invalid payload format.' required_attributes = ['@context', 'created', 'id', 'publicKey', 'authentication', 'proof', 'service'] required_metadata_base_attributes = ['name', 'dateCreated', 'author', 'license', 'price', 'encryptedFiles', 'type', 'checksum'] data = request.json if not data: logger.error(f'request body seems empty, expecting {required_attributes}') return 400 msg, status = check_required_attributes(required_attributes, data, 'register') if msg: return msg, status msg, status = check_required_attributes(required_metadata_base_attributes, _get_base_metadata(data['service']), 'register') if msg: return msg, status msg, status = check_no_urls_in_files(_get_base_metadata(data['service']), 'register') if msg: return msg, status msg, status = validate_date_format(data['created']) if status: return msg, status _record = dict() _record = copy.deepcopy(data) _record['created'] = datetime.strptime(data['created'], '%Y-%m-%dT%H:%M:%SZ') for service in _record['service']: if service['type'] == 'Metadata': service_id = int(service['serviceDefinitionId']) _record['service'][service_id]['metadata']['base']['datePublished'] = \ datetime.strptime(f'{datetime.utcnow().replace(microsecond=0).isoformat()}Z', '%Y-%m-%dT%H:%M:%SZ') _record['service'][service_id]['metadata']['curation'] = {} _record['service'][service_id]['metadata']['curation']['rating'] = 0.00 _record['service'][service_id]['metadata']['curation']['numVotes'] = 0 _record['service'][service_id]['metadata']['curation']['isListed'] = True try: dao.register(_record, data['id']) # add new assetId to response return Response(_sanitize_record(_record), 201, content_type='application/json') except Exception as err: logger.error(f'encounterd an error while saving the asset data to OceanDB: {str(err)}') return f'Some error: {str(err)}', 500
java
@Override protected BundleRenderer createRenderer(FacesContext context) { ResourceBundlesHandler rsHandler = getResourceBundlesHandler(context); String type = (String) getAttributes().get(JawrConstant.TYPE_ATTR); boolean async = Boolean.parseBoolean((String) getAttributes().get(JawrConstant.ASYNC_ATTR)); boolean defer = Boolean.parseBoolean((String) getAttributes().get(JawrConstant.DEFER_ATTR)); String crossorigin = (String) getAttributes().get(JawrConstant.CROSSORIGIN_ATTR); return RendererFactory.getJsBundleRenderer(rsHandler, type, getUseRandomParamFlag(rsHandler.getConfig()), async, defer, crossorigin); }
python
def _diff_(src, dst, ret=None, jp=None, exclude=[], include=[]): """ compare 2 dict/list, return a list containing json-pointer indicating what's different, and what's diff exactly. - list length diff: (jp, length of src, length of dst) - dict key diff: (jp, None, None) - when src is dict or list, and dst is not: (jp, type(src), type(dst)) - other: (jp, src, dst) """ def _dict_(src, dst, ret, jp): ss, sd = set(src.keys()), set(dst.keys()) # what's include is prior to what's exclude si, se = set(include or []), set(exclude or []) ss, sd = (ss & si, sd & si) if si else (ss, sd) ss, sd = (ss - se, sd - se) if se else (ss, sd) # added keys for k in sd - ss: ret.append((jp_compose(k, base=jp), None, None,)) # removed keys for k in ss - sd: ret.append((jp_compose(k, base=jp), None, None,)) # same key for k in ss & sd: _diff_(src[k], dst[k], ret, jp_compose(k, base=jp), exclude, include) def _list_(src, dst, ret, jp): if len(src) < len(dst): ret.append((jp, len(src), len(dst),)) elif len(src) > len(dst): ret.append((jp, len(src), len(dst),)) else: if len(src) == 0: return # make sure every element in list is the same def r(x, y): if type(y) != type(x): raise ValueError('different type: {0}, {1}'.format(type(y).__name__, type(x).__name__)) return x ts = type(functools.reduce(r, src)) td = type(functools.reduce(r, dst)) # when type is different while True: if issubclass(ts, six.string_types) and issubclass(td, six.string_types): break if issubclass(ts, six.integer_types) and issubclass(td, six.integer_types): break if ts == td: break ret.append((jp, str(ts), str(td),)) return if ts != dict: ss, sd = sorted(src), sorted(dst) else: # process dict without sorting # TODO: find a way to sort list of dict, (ooch) ss, sd = src, dst for idx, (s, d) in enumerate(zip(src, dst)): _diff_(s, d, ret, jp_compose(str(idx), base=jp), exclude, include) ret = [] if ret == None else ret jp = '' if jp == None else jp if isinstance(src, dict): if not isinstance(dst, dict): ret.append((jp, type(src).__name__, type(dst).__name__,)) else: _dict_(src, dst, ret, jp) elif isinstance(src, list): if not isinstance(dst, list): ret.append((jp, type(src).__name__, type(dst).__name__,)) else: _list_(src, dst, ret, jp) elif src != dst: ret.append((jp, src, dst,)) return ret
java
@Override public synchronized SnapshotContentItem read() throws Exception, UnexpectedInputException, ParseException, NonTransientResourceException { if (this.items == null) { this.items = new StreamingIterator<>(new JpaIteratorSource<SnapshotContentItemRepo, SnapshotContentItem>(repo) { @Override protected Page<SnapshotContentItem> getNextPage(Pageable pageable, SnapshotContentItemRepo repo) { return repo.findBySnapshotName(snapshotName, pageable); } }); skipLinesAlreadyRead(this.items); } return this.items.hasNext() ? this.items.next() : null; }
python
def update_iptables(self): """Update iptables based on information in the rule_info.""" # Read the iptables iptables_cmds = ['iptables-save', '-c'] all_rules = dsl.execute(iptables_cmds, root_helper=self._root_helper, log_output=False) # For each rule in rule_info update the rule if necessary. new_rules = [] is_modified = False for line in all_rules.split('\n'): new_line = line line_content = line.split() # The spoofing rule which includes mac and ip should have # -s cidr/32 option for ip address. Otherwise no rule # will be modified. if '-s' in line_content: tmp_rule_info = list(self.rule_info) for rule in tmp_rule_info: if (rule.mac in line.lower() and rule.chain.lower() in line.lower() and not self._is_ip_in_rule(rule.ip, line_content)): ip_loc = line_content.index('-s') + 1 line_content[ip_loc] = rule.ip + '/32' new_line = ' '.join(line_content) LOG.debug('Modified %(old_rule)s. ' 'New rule is %(new_rule)s.' % ( {'old_rule': line, 'new_rule': new_line})) is_modified = True new_rules.append(new_line) if is_modified and new_rules: # Updated all the rules. Now commit the new rules. iptables_cmds = ['iptables-restore', '-c'] dsl.execute(iptables_cmds, process_input='\n'.join(new_rules), root_helper=self._root_helper, log_output=False)
java
public void handleTrace(HttpRequest request, HttpResponse response) throws IOException { boolean trace=getHttpContext().getHttpServer().getTrace(); // Handle TRACE by returning request header response.setField(HttpFields.__ContentType, HttpFields.__MessageHttp); if (trace) { OutputStream out = response.getOutputStream(); ByteArrayISO8859Writer writer = new ByteArrayISO8859Writer(); writer.write(request.toString()); writer.flush(); response.setIntField(HttpFields.__ContentLength,writer.size()); writer.writeTo(out); out.flush(); } request.setHandled(true); }
java
public static int serialize(final File directory, String name, Object obj) { try (FileOutputStream stream = new FileOutputStream(new File(directory, name))) { return serialize(stream, obj); } catch (IOException e) { throw new ReportGenerationException(e); } }
java
public java.util.Map<String, java.util.List<String>> getOverriddenParameters() { return overriddenParameters; }
java
@Override public void onError(InvocationContext context, Exception error) { throw InvocationException.newInstance(context, error); }
java
public List<IAssociativeReducer> deserializeReducerList(String str) { return load(str, ListWrappers.ReducerList.class).getList(); }
python
def get_url( width, height=None, background_color="cccccc", text_color="969696", text=None, random_background_color=False ): """ Craft the URL for a placeholder image. You can customize the background color, text color and text using the optional keyword arguments If you want to use a random color pass in random_background_color as True. """ if random_background_color: background_color = _get_random_color() # If height is not provided, presume it is will be a square if not height: height = width d = dict( width=width, height=height, bcolor=background_color, tcolor=text_color ) url = URL % d if text: text = text.replace(" ", "+") url = url + "?text=" + text return url
python
def print_agi_and_mpe_balances(self): """ Print balance of ETH, AGI, and MPE wallet """ if (self.args.account): account = self.args.account else: account = self.ident.address eth_wei = self.w3.eth.getBalance(account) agi_cogs = self.call_contract_command("SingularityNetToken", "balanceOf", [account]) mpe_cogs = self.call_contract_command("MultiPartyEscrow", "balances", [account]) # we cannot use _pprint here because it doesn't conserve order yet self._printout(" account: %s"%account) self._printout(" ETH: %s"%self.w3.fromWei(eth_wei, 'ether')) self._printout(" AGI: %s"%cogs2stragi(agi_cogs)) self._printout(" MPE: %s"%cogs2stragi(mpe_cogs))
python
def gsum_(self, col: str, index_col: bool=True) -> "Ds": """ Group by and sum column :param col: column to group :type col: str :param index_col: :type index_col: bool :return: a dataswim instance :rtype: Ds :example: ``ds2 = ds.gsum("Col 1")`` """ try: df = self.df.copy() df = df.groupby([col]).sum() if index_col is True: df[col] = df.index.values return self._duplicate_(df) except Exception as e: self.err(e, self.gsum_, "Can not groupsum column")
python
def handle_aggregated_quotas(sender, instance, **kwargs): """ Call aggregated quotas fields update methods """ quota = instance # aggregation is not supported for global quotas. if quota.scope is None: return quota_field = quota.get_field() # usage aggregation should not count another usage aggregator field to avoid calls duplication. if isinstance(quota_field, fields.UsageAggregatorQuotaField) or quota_field is None: return signal = kwargs['signal'] for aggregator_quota in quota_field.get_aggregator_quotas(quota): field = aggregator_quota.get_field() if signal == signals.post_save: field.post_child_quota_save(aggregator_quota.scope, child_quota=quota, created=kwargs.get('created')) elif signal == signals.pre_delete: field.pre_child_quota_delete(aggregator_quota.scope, child_quota=quota)
python
def _iter_module_subclasses(package, module_name, base_cls): """inspect all modules in this directory for subclasses of inherit from ``base_cls``. inpiration from http://stackoverflow.com/q/1796180/564709 """ module = importlib.import_module('.' + module_name, package) for name, obj in inspect.getmembers(module): if inspect.isclass(obj) and issubclass(obj, base_cls): yield obj
java
public static double cdf(double x, double mu, double sigma) { if(x <= 0.) { return 0.; } return .5 * (1 + NormalDistribution.erf((FastMath.log(x) - mu) / (MathUtil.SQRT2 * sigma))); }
python
def _gather_field_values( item, *, fields=None, field_map=FIELD_MAP, normalize_values=False, normalize_func=normalize_value): """Create a tuple of normalized metadata field values. Parameter: item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Returns: tuple: Values from the given metadata fields. """ it = get_item_tags(item) if fields is None: fields = list(it.keys()) normalize = normalize_func if normalize_values else lambda x: str(x) field_values = [] for field in fields: field_values.append( normalize( list_to_single_value( get_field(it, field, field_map=field_map) ) ) ) return tuple(field_values)
python
def request(self, method, endpoint, body=None, timeout=-1): """ Perform a request with a given body to a given endpoint in UpCloud's API. Handles errors with __error_middleware. """ if method not in set(['GET', 'POST', 'PUT', 'DELETE']): raise Exception('Invalid/Forbidden HTTP method') url = '/' + self.api_v + endpoint headers = { 'Authorization': self.token, 'Content-Type': 'application/json' } if body: json_body_or_None = json.dumps(body) else: json_body_or_None = None call_timeout = timeout if timeout != -1 else self.timeout APIcall = getattr(requests, method.lower()) res = APIcall('https://api.upcloud.com' + url, data=json_body_or_None, headers=headers, timeout=call_timeout) if res.text: res_json = res.json() else: res_json = {} return self.__error_middleware(res, res_json)
python
def get_cluster_custom_object_status(self, group, version, plural, name, **kwargs): # noqa: E501 """get_cluster_custom_object_status # noqa: E501 read status of the specified cluster scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_cluster_custom_object_status(group, version, plural, name, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_cluster_custom_object_status_with_http_info(group, version, plural, name, **kwargs) # noqa: E501 else: (data) = self.get_cluster_custom_object_status_with_http_info(group, version, plural, name, **kwargs) # noqa: E501 return data
python
def get_config_path(): """Put together the default configuration path based on OS.""" dir_path = (os.getenv('APPDATA') if os.name == "nt" else os.path.expanduser('~')) return os.path.join(dir_path, '.vtjp')
python
def inserir( self, id_equipamento, fqdn, user, password, id_tipo_acesso, enable_pass): """Add new relationship between equipment and access type and returns its id. :param id_equipamento: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param id_tipo_acesso: Access Type identifier. :param enable_pass: Enable access. :return: Dictionary with the following: {‘equipamento_acesso’: {‘id’: < id >}} :raise EquipamentoNaoExisteError: Equipment doesn't exist. :raise TipoAcessoNaoExisteError: Access Type doesn't exist. :raise EquipamentoAcessoError: Equipment and access type already associated. :raise InvalidParameterError: The parameters equipment id, fqdn, user, password or access type id are invalid or none. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ equipamento_acesso_map = dict() equipamento_acesso_map['id_equipamento'] = id_equipamento equipamento_acesso_map['fqdn'] = fqdn equipamento_acesso_map['user'] = user equipamento_acesso_map['pass'] = password equipamento_acesso_map['id_tipo_acesso'] = id_tipo_acesso equipamento_acesso_map['enable_pass'] = enable_pass code, xml = self.submit( {'equipamento_acesso': equipamento_acesso_map}, 'POST', 'equipamentoacesso/') return self.response(code, xml)
python
def handle_pause(self): """Read pause signal from server""" flag = self.reader.byte() if flag > 0: logger.info(" -> pause: on") self.controller.playing = False else: logger.info(" -> pause: off") self.controller.playing = True
java
public static Field getDeclaredFieldWithPath(Class<?> clazz, String path) { int lastDot = path.lastIndexOf('.'); if (lastDot > -1) { String parentPath = path.substring(0, lastDot); String fieldName = path.substring(lastDot + 1); Field parentField = getDeclaredFieldWithPath(clazz, parentPath); return getDeclaredFieldInHierarchy(parentField.getType(), fieldName); } else { return getDeclaredFieldInHierarchy(clazz, path); } }
python
async def get_data(self): """Retrieve the data.""" try: with async_timeout.timeout(5, loop=self._loop): response = await self._session.get(self.url) _LOGGER.debug( "Response from Volkszaehler API: %s", response.status) self.data = await response.json() _LOGGER.debug(self.data) except (asyncio.TimeoutError, aiohttp.ClientError): _LOGGER.error("Can not load data from Volkszaehler API") self.data = None raise exceptions.VolkszaehlerApiConnectionError() self.average = self.data['data']['average'] self.max = self.data['data']['max'][1] self.min = self.data['data']['min'][1] self.consumption = self.data['data']['consumption'] self.tuples = self.data['data']['tuples']
python
def replace_all(self, text=None): """ Replaces all occurrences in the editor's document. :param text: The replacement text. If None, the content of the lineEdit replace will be used instead """ cursor = self.editor.textCursor() cursor.beginEditBlock() remains = self.replace(text=text) while remains: remains = self.replace(text=text) cursor.endEditBlock()
java
void endOptional(boolean successful) { if (successful) { parsed.remove(parsed.size() - 2); } else { parsed.remove(parsed.size() - 1); } }
java
public boolean containsAnnotation(BackedAnnotatedType<?> annotatedType, Class<? extends Annotation> requiredAnnotation) { // class level annotations if (containsAnnotation(annotatedType.getAnnotations(), requiredAnnotation, true)) { return true; } for (Class<?> clazz = annotatedType.getJavaClass(); clazz != null && clazz != Object.class; clazz = clazz.getSuperclass()) { // fields for (Field field : clazz.getDeclaredFields()) { if (containsAnnotations(cache.getAnnotations(field), requiredAnnotation)) { return true; } } // constructors for (Constructor<?> constructor : clazz.getDeclaredConstructors()) { if (containsAnnotations(cache.getAnnotations(constructor), requiredAnnotation)) { return true; } for (Annotation[] parameterAnnotations : constructor.getParameterAnnotations()) { if (containsAnnotations(parameterAnnotations, requiredAnnotation)) { return true; } } } // methods for (Method method : clazz.getDeclaredMethods()) { if (containsAnnotations(cache.getAnnotations(method), requiredAnnotation)) { return true; } for (Annotation[] parameterAnnotations : method.getParameterAnnotations()) { if (containsAnnotations(parameterAnnotations, requiredAnnotation)) { return true; } } } } // Also check default methods on interfaces for (Class<?> interfaceClazz : Reflections.getInterfaceClosure(annotatedType.getJavaClass())) { for (Method method : interfaceClazz.getDeclaredMethods()) { if (Reflections.isDefault(method)) { if (containsAnnotations(cache.getAnnotations(method), requiredAnnotation)) { return true; } for (Annotation[] parameterAnnotations : method.getParameterAnnotations()) { if (containsAnnotations(parameterAnnotations, requiredAnnotation)) { return true; } } } } } return false; }
java
public void addLine(String line) { try { writer.append(line); writer.newLine(); } catch (IOException e) { e.printStackTrace(); } }
java
public static base_response update(nitro_service client, nshttpparam resource) throws Exception { nshttpparam updateresource = new nshttpparam(); updateresource.dropinvalreqs = resource.dropinvalreqs; updateresource.markhttp09inval = resource.markhttp09inval; updateresource.markconnreqinval = resource.markconnreqinval; updateresource.insnssrvrhdr = resource.insnssrvrhdr; updateresource.nssrvrhdr = resource.nssrvrhdr; updateresource.logerrresp = resource.logerrresp; updateresource.conmultiplex = resource.conmultiplex; updateresource.maxreusepool = resource.maxreusepool; return updateresource.update_resource(client); }
python
def check_can_approve(self, request, application, roles): """ Check the person's authorization. """ try: authorised_persons = self.get_authorised_persons(application) authorised_persons.get(pk=request.user.pk) return True except Person.DoesNotExist: return False
python
def get_library_version(): """ Get the version number of the underlying gphoto2 library. :return: The version :rtype: tuple of (major, minor, patch) version numbers """ version_str = ffi.string(lib.gp_library_version(True)[0]).decode() return tuple(int(x) for x in version_str.split('.'))
java
public static void main(final String[] args) { Switch about = new Switch("a", "about", "display about message"); Switch help = new Switch("h", "help", "display help message"); StringListArgument snpIdFilter = new StringListArgument("s", "snp-ids", "filter by snp id", true); FileArgument inputVcfFile = new FileArgument("i", "input-vcf-file", "input VCF file, default stdin", false); FileArgument outputVcfFile = new FileArgument("o", "output-vcf-file", "output VCF file, default stdout", false); ArgumentList arguments = new ArgumentList(about, help, snpIdFilter, inputVcfFile, outputVcfFile); CommandLine commandLine = new CommandLine(args); FilterVcf filterVcf = null; try { CommandLineParser.parse(commandLine, arguments); if (about.wasFound()) { About.about(System.out); System.exit(0); } if (help.wasFound()) { Usage.usage(USAGE, null, commandLine, arguments, System.out); System.exit(0); } filterVcf = new FilterVcf(new IdFilter(snpIdFilter.getValue()), inputVcfFile.getValue(), outputVcfFile.getValue()); } catch (CommandLineParseException e) { if (about.wasFound()) { About.about(System.out); System.exit(0); } if (help.wasFound()) { Usage.usage(USAGE, null, commandLine, arguments, System.out); System.exit(0); } Usage.usage(USAGE, e, commandLine, arguments, System.err); System.exit(-1); } catch (NullPointerException e) { Usage.usage(USAGE, e, commandLine, arguments, System.err); System.exit(-1); } try { System.exit(filterVcf.call()); } catch (Exception e) { e.printStackTrace(); System.exit(1); } }
java
@Override public Object get(PageContext pc, Collection.Key key) throws PageException { return COMUtil.toObject(this, Dispatch.call(dispatch, key.getString()), key.getString()); }
java
Rule NonQuoteOneTextLine() { //TODO TexText? return FirstOf(SP(), '!', CharRange('#', ':'), CharRange('<', '~'), LatinExtendedAndOtherAlphabet() ).label(NonQuoteOneTextLine).suppressSubnodes(); }
java
void setValidityCheck(PassFactory validityCheck) { this.validityCheck = validityCheck; this.changeVerifier = new ChangeVerifier(compiler).snapshot(jsRoot); }
python
def insertDataset(self, businput): """ input dictionary must have the following keys: dataset, primary_ds_name(name), processed_ds(name), data_tier(name), acquisition_era(name), processing_version It may have following keys: physics_group(name), xtcrosssection, creation_date, create_by, last_modification_date, last_modified_by """ if not ("primary_ds_name" in businput and "dataset" in businput and "dataset_access_type" in businput and "processed_ds_name" in businput ): dbsExceptionHandler('dbsException-invalid-input', "business/DBSDataset/insertDataset must have dataset,\ dataset_access_type, primary_ds_name, processed_ds_name as input") if "data_tier_name" not in businput: dbsExceptionHandler('dbsException-invalid-input', "insertDataset must have data_tier_name as input.") conn = self.dbi.connection() tran = conn.begin() try: dsdaoinput = {} dsdaoinput["primary_ds_name"] = businput["primary_ds_name"] dsdaoinput["data_tier_name"] = businput["data_tier_name"].upper() dsdaoinput["dataset_access_type"] = businput["dataset_access_type"].upper() #not required pre-exist in the db. will insert with the dataset if not in yet #processed_ds_name=acquisition_era_name[-fileter_name][-processing_str]-vprocessing_version Changed as 4/30/2012 YG. #althrough acquisition era and processing version is not required for a dataset in the schema(the schema is build this way because #we need to accomdate the DBS2 data), but we impose the requirement on the API. So both acquisition and processing eras are required #YG 12/07/2011 TK-362 if "acquisition_era_name" in businput and "processing_version" in businput: erals=businput["processed_ds_name"].rsplit('-') if erals[0]==businput["acquisition_era_name"] and erals[len(erals)-1]=="%s%s"%("v", businput["processing_version"]): dsdaoinput["processed_ds_name"] = businput["processed_ds_name"] else: dbsExceptionHandler('dbsException-invalid-input', "insertDataset:\ processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified.") else: dbsExceptionHandler("dbsException-missing-data", "insertDataset: Required acquisition_era_name or processing_version is not found in the input") if "physics_group_name" in businput: dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"]) if dsdaoinput["physics_group_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "insertDataset. physics_group_name not found in DB") else: dsdaoinput["physics_group_id"] = None dsdaoinput["dataset_id"] = self.sm.increment(conn, "SEQ_DS") # we are better off separating out what we need for the dataset DAO dsdaoinput.update({ "dataset" : "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper()), "prep_id" : businput.get("prep_id", None), "xtcrosssection" : businput.get("xtcrosssection", None), "creation_date" : businput.get("creation_date", dbsUtils().getTime() ), "create_by" : businput.get("create_by", dbsUtils().getCreateBy()) , "last_modification_date" : businput.get("last_modification_date", dbsUtils().getTime()), #"last_modified_by" : businput.get("last_modified_by", dbsUtils().getModifiedBy()) "last_modified_by" : dbsUtils().getModifiedBy() }) """ repeated again, why? comment out by YG 3/14/2012 #physics group if "physics_group_name" in businput: dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"]) if dsdaoinput["physics_group_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "insertDataset. Physics Group : %s Not found" % businput["physics_group_name"]) else: dsdaoinput["physics_group_id"] = None """ # See if Processing Era exists if "processing_version" in businput and businput["processing_version"] != 0: dsdaoinput["processing_era_id"] = self.proceraid.execute(conn, businput["processing_version"]) if dsdaoinput["processing_era_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: processing_version not found in DB") else: dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: processing_version is required") # See if Acquisition Era exists if "acquisition_era_name" in businput: dsdaoinput["acquisition_era_id"] = self.acqeraid.execute(conn, businput["acquisition_era_name"]) if dsdaoinput["acquisition_era_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: acquisition_era_name not found in DB") else: dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: acquisition_era_name is required") try: # insert the dataset self.datasetin.execute(conn, dsdaoinput, tran) except SQLAlchemyIntegrityError as ex: if (str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1): # dataset already exists, lets fetch the ID self.logger.warning( "Unique constraint violation being ignored...") self.logger.warning("%s" % ex) ds = "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper()) dsdaoinput["dataset_id"] = self.datasetid.execute(conn, ds ) if dsdaoinput["dataset_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset. Strange error, the dataset %s does not exist ?" % ds ) if (str(ex).find("ORA-01400") ) != -1 : dbsExceptionHandler("dbsException-missing-data", "insertDataset must have: dataset,\ primary_ds_name, processed_ds_name, data_tier_name ") except Exception as e: raise #FIXME : What about the READ-only status of the dataset #There is no READ-oly status for a dataset. # Create dataset_output_mod_mod_configs mapping if "output_configs" in businput: for anOutConfig in businput["output_configs"]: dsoutconfdaoin = {} dsoutconfdaoin["dataset_id"] = dsdaoinput["dataset_id"] dsoutconfdaoin["output_mod_config_id"] = self.outconfigid.execute(conn, anOutConfig["app_name"], anOutConfig["release_version"], anOutConfig["pset_hash"], anOutConfig["output_module_label"], anOutConfig["global_tag"]) if dsoutconfdaoin["output_mod_config_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: Output config (%s, %s, %s, %s, %s) not found" % (anOutConfig["app_name"], anOutConfig["release_version"], anOutConfig["pset_hash"], anOutConfig["output_module_label"], anOutConfig["global_tag"])) try: self.datasetoutmodconfigin.execute(conn, dsoutconfdaoin, tran) except Exception as ex: if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: pass else: raise # Dataset parentage will NOT be added by this API it will be set by insertFiles()--deduced by insertFiles # Dataset runs will NOT be added by this API they will be set by insertFiles()--deduced by insertFiles OR insertRun API call tran.commit() tran = None except Exception: if tran: tran.rollback() tran = None raise finally: if tran: tran.rollback() if conn: conn.close()