language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def sdm2ms(sdmfile, msfile, scan, inttime='0'): """ Converts sdm to ms format for a single scan. msfile defines the name template for the ms. Should end in .ms, but "s<scan>" will be put in. scan is string of (sdm counted) scan number. inttime is string to feed to split command. gives option of integrated data down in time. """ # fill ms file if os.path.exists(msfile): logger.debug('%s already set.' % msfile) else: logger.info('No %s found. Creating anew.' % msfile) if inttime != '0': logger.info('Filtering by int time.') call(['asdm2MS', '--ocm', 'co', '--icm', 'co', '--lazy', '--scans', scan, sdmfile, msfile + '.tmp']) cfg = tasklib.SplitConfig() # configure split cfg.vis = msfile + '.tmp' cfg.out = msfile cfg.timebin = inttime cfg.col = 'data' cfg.antenna = '*&*' # discard autos tasklib.split(cfg) # run task # clean up rmtree(msfile+'.tmp') else: call(['asdm2MS', '--ocm', 'co', '--icm', 'co', '--lazy', '--scans', scan, sdmfile, msfile]) return msfile
python
def render_pep440_bare(pieces): """TAG[.DISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.DISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered
python
def _verbose_print(self, desc, init, arr): """Internal verbose print function Parameters ---------- desc : InitDesc or str name of the array init : str initializer pattern arr : NDArray initialized array """ if self._verbose and self._print_func: logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
python
def get_config_value_from_file(key, config_path=None, default=None): """Return value if key exists in file. Return default if key not in config. """ config = _get_config_dict_from_file(config_path) if key not in config: return default return config[key]
java
public static <K, V> HashMap<K, V> sizedHashMap(final int size) { return new HashMap<K, V>(size); }
java
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { boolean defaultPresent = node != null && isNotEmpty(node.asText()); String fieldType = field.type().fullName(); if (defaultPresent && !field.type().isPrimitive() && node.isNull()) { field.init(JExpr._null()); } else if (fieldType.startsWith(List.class.getName())) { field.init(getDefaultList(field.type(), node)); } else if (fieldType.startsWith(Set.class.getName())) { field.init(getDefaultSet(field.type(), node)); } else if (fieldType.startsWith(String.class.getName()) && node != null ) { field.init(getDefaultValue(field.type(), node)); } else if (defaultPresent) { field.init(getDefaultValue(field.type(), node)); } return field; }
java
public void assertExecuteMethodResponseDefined(ActionResponse response) { if (response.isUndefined()) { final ExceptionMessageBuilder br = new ExceptionMessageBuilder(); br.addNotice("Cannot return undefined resopnse from the execute method."); br.addItem("Advice"); br.addElement("Not allowed to return undefined() in execute method."); br.addElement("If you want to return response as empty body,"); br.addElement("use asEmptyBody() like this:"); br.addElement(" @Execute"); br.addElement(" public HtmlResponse index() {"); br.addElement(" return HtmlResponse.asEmptyBody();"); br.addElement(" }"); br.addItem("Action Execute"); br.addElement(execute); final String msg = br.buildExceptionMessage(); throw new ExecuteMethodReturnUndefinedResponseException(msg); } }
java
@SuppressWarnings("unchecked") public void writeObjectField(final EnMember member, Object obj) { Object value = member.attribute.get(obj); if (value == null) return; if (tiny()) { if (member.istring) { if (((CharSequence) value).length() == 0) return; } else if (member.isbool) { if (!((Boolean) value)) return; } } this.writeFieldName(member); member.encoder.convertTo(this, value); this.comma = true; }
python
def get_possible_initializer_keys(cls, num_layers): """Returns the keys the dictionary of variable initializers may contain. The set of all possible initializer keys are: wt: weight for input -> T gate wh: weight for input -> H gate wtL: weight for prev state -> T gate for layer L (indexed from 0) whL: weight for prev state -> H gate for layer L (indexed from 0) btL: bias for prev state -> T gate for layer L (indexed from 0) bhL: bias for prev state -> H gate for layer L (indexed from 0) Args: num_layers: (int) Number of highway layers. Returns: Set with strings corresponding to the strings that may be passed to the constructor. """ keys = [cls.WT, cls.WH] for layer_index in xrange(num_layers): layer_str = str(layer_index) keys += [ cls.WT + layer_str, cls.BT + layer_str, cls.WH + layer_str, cls.BH + layer_str] return set(keys)
python
def eigvec_to_eigdispl(v, q, frac_coords, mass): """ Converts a single eigenvector to an eigendisplacement in the primitive cell according to the formula:: exp(2*pi*i*(frac_coords \\dot q) / sqrt(mass) * v Compared to the modulation option in phonopy, here all the additional multiplicative and phase factors are set to 1. Args: v: the vector that should be converted. A 3D complex numpy array. q: the q point in fractional coordinates frac_coords: the fractional coordinates of the atom mass: the mass of the atom """ c = np.exp(2j * np.pi * np.dot(frac_coords, q)) / np.sqrt(mass) return c*v
java
public java.util.List<ProductCode> getProductCodes() { if (productCodes == null) { productCodes = new com.amazonaws.internal.SdkInternalList<ProductCode>(); } return productCodes; }
java
private Comparator<String> getInitedAliasComparator() { if (MapUtil.isEmpty(this.headerAlias)) { return null; } Comparator<String> aliasComparator = this.aliasComparator; if (null == aliasComparator) { Set<String> keySet = this.headerAlias.keySet(); aliasComparator = new IndexedComparator<>(keySet.toArray(new String[keySet.size()])); this.aliasComparator = aliasComparator; } return aliasComparator; }
java
private void encodeMarkup(final FacesContext context, final Social social) throws IOException { final ResponseWriter writer = context.getResponseWriter(); final String clientId = social.getClientId(context); final String widgetVar = social.resolveWidgetVar(); final String styleClass = social.getTheme() + " " + StringUtils.defaultString(social.getStyleClass()); writer.startElement("div", social); writer.writeAttribute("id", clientId, "id"); writer.writeAttribute(HTML.WIDGET_VAR, widgetVar, null); writer.writeAttribute("class", styleClass, "styleClass"); if (social.getStyle() != null) { writer.writeAttribute("style", social.getStyle(), "style"); } writer.endElement("div"); }
java
private int doBatchKmeans () { System.out.println("\nBegining a new iteration of K-Means..."); int numReassigned = 0; /* Clear records for incremental k-means */ for (int i = 0; i < this.centroids.length; ++i) { this.newClusters[i] = new ArrayList<Instance>(); this.newCentroids[i] = new HashSparseVector(); this.newQualities[i] = 0.0f; } for (int clusterNum = 0; clusterNum < this.centroids.length; ++clusterNum) { // iterate over clusters for (int docNum = 0; docNum < this.assignedClusters[clusterNum].size(); ++docNum) { // iterate over docs /* * Store the document the loops have selected in the 'doc' variable. * Store is vector in the 'docVec' variable for easy access. */ Instance doc = this.assignedClusters[clusterNum].get(docNum); HashSparseVector docVec = (HashSparseVector) doc.getData(); int bestClusterNum = clusterNum; // Assume we are already in the best cluster. double distanceToCurrentCentroid = this.centroids[clusterNum].distanceEuclidean(docVec); double squareDistanceOfBestCluster = distanceToCurrentCentroid; for (int i = 0; i < this.centroids.length; ++i) { double distance = 0.0; // see which centroid is closest to docVec if (clusterNum == i) { // We know the distance in its' current cluster. distance = distanceToCurrentCentroid; } else { distance = this.centroids[i].distanceEuclidean(docVec); } if (distance < squareDistanceOfBestCluster) { squareDistanceOfBestCluster = distance; bestClusterNum = i; } } if (bestClusterNum != clusterNum) { // we moved a document! ++numReassigned; } this.newClusters[bestClusterNum].add(doc); this.newCentroids[bestClusterNum].plus(docVec); } } // Calculate the centroids of the clusters for (int i = 0; i < newClusters.length; ++i) { this.newCentroids[i].scaleDivide(this.newClusters[i].size()); this.newQualities[i] = this.calculateClusterQuality(this.newClusters[i], this.newCentroids[i]); System.out.println("new cluster " + i + " Viarances: " + this.newQualities[i] + " Num: "+ newClusters[i].size()); } return (numReassigned); }
java
double string2Fraction(CharSequence cs, int readed) { if (cs.length() > 16) { throw new JsonParseException("Number string is too long.", readed); } double d = 0.0; for (int i = 0; i < cs.length(); i++) { int n = cs.charAt(i) - '0'; d = d + (n == 0 ? 0 : n / Math.pow(10, i + 1)); } return d; }
java
public static boolean zipFolder(File folder, String fileName){ boolean success = false; if(!folder.isDirectory()){ return false; } if(fileName == null){ fileName = folder.getAbsolutePath()+ZIP_EXT; } ZipArchiveOutputStream zipOutput = null; try { zipOutput = new ZipArchiveOutputStream(new File(fileName)); success = addFolderContentToZip(folder,zipOutput,""); zipOutput.close(); } catch (IOException e) { e.printStackTrace(); return false; } finally{ try { if(zipOutput != null){ zipOutput.close(); } } catch (IOException e) {} } return success; }
python
def fetch_image(self, path, dest, user='root'): """Store in the user home directory an image from a remote location. """ self.run('test -f %s || curl -L -s -o %s %s' % (dest, dest, path), user=user, ignore_error=True)
java
@Override public T deserializeWrapped( JsonReader reader, JsonDeserializationContext ctx, JsonDeserializerParameters params, IdentityDeserializationInfo identityInfo, TypeDeserializationInfo typeInfo, String typeInformation ) { return instanceBuilder.newInstance( reader, ctx, params, null, null ).getInstance(); }
python
def delete_nve_member(self, nexus_host, nve_int_num, vni): """Delete a member configuration on the NVE interface.""" starttime = time.time() path_snip = snipp.PATH_VNI_UPDATE % (nve_int_num, vni) self.client.rest_delete(path_snip, nexus_host) self.capture_and_print_timeshot( starttime, "delete_nve", switch=nexus_host)
java
public String getNodeDistributionRootUrl() { String ret = nodeDistributionRootUrl; if (nodeDistributionRootUrl == null) { ret = Constants.NODE_DIST_ROOT_URL; } return ret; }
python
def _get_filename_from_url(url): """ Return a filename from a URL Args: url (str): URL to extract filename from Returns: (str): Filename in URL """ parse = urlparse(url) return os.path.basename(parse.path)
python
def writeSAM(sam,SAMfile,header=None): """ Writes a pandas dataframe with the respective SAM columns: 'QNAME','FLAG','RNAME','POS','MAPQ','CIGAR','RNEXT','PNEXT','TLEN','SEQ','QUAL' into a sam file :param sam: pandas dataframe to be writen :param SAMfile: /path/to/file.sam :returns: nothing """ def toNone(x): if x=="None": x=np.nan return x sam.reset_index(inplace=True,drop=True) QUAL=pd.DataFrame(sam['QUAL'].str.split("\t").tolist()) cols=QUAL.columns.tolist() for c in cols: QUAL[c]=QUAL[c].apply(lambda x: toNone(x)) sam=sam.drop(['QUAL'],axis=1) sam=pd.concat([sam,QUAL],axis=1) sam=sam.astype(str) sam=sam.as_matrix() tfile=open(SAMfile, "w+") if header != None: for l in header: tfile.write(l) for l in sam: l=[ s for s in l if s not in ['nan'] ] l="\t".join(l) tfile.write(l+"\n") tfile.close()
java
public void setAdjustmentTimeSeries(com.google.api.ads.admanager.axis.v201902.TimeSeries adjustmentTimeSeries) { this.adjustmentTimeSeries = adjustmentTimeSeries; }
python
def update_user(self, id, **kwargs): # noqa: E501 """Update user with given user groups and permissions. # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_user(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param UserRequestDTO body: Example Body: <pre>{ \"identifier\": \"[email protected]\", \"groups\": [ \"user_management\" ], \"userGroups\": [ \"8b23136b-ecd2-4cb5-8c92-62477dcc4090\" ] }</pre> :return: UserModel If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_user_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.update_user_with_http_info(id, **kwargs) # noqa: E501 return data
python
def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias, sampled_values, remove_accidental_hits=True): """ Sampled softmax via importance sampling. This under-estimates the full softmax and is only used for training. """ # inputs = (n, in_dim) sample, prob_sample, prob_target = sampled_values # (num_samples, ) sample = S.var('sample', shape=(num_samples,), dtype='float32') # (n, ) label = S.var('label') label = S.reshape(label, shape=(-1,), name="label_reshape") # (num_samples+n, ) sample_label = S.concat(sample, label, dim=0) # lookup weights and biases # (num_samples+n, dim) sample_target_w = S.sparse.Embedding(data=sample_label, weight=weight, input_dim=num_classes, output_dim=in_dim, sparse_grad=True) # (num_samples+n, 1) sample_target_b = S.sparse.Embedding(data=sample_label, weight=bias, input_dim=num_classes, output_dim=1, sparse_grad=True) # (num_samples, dim) sample_w = S.slice(sample_target_w, begin=(0, 0), end=(num_samples, None)) target_w = S.slice(sample_target_w, begin=(num_samples, 0), end=(None, None)) sample_b = S.slice(sample_target_b, begin=(0, 0), end=(num_samples, None)) target_b = S.slice(sample_target_b, begin=(num_samples, 0), end=(None, None)) # target # (n, 1) true_pred = S.sum(target_w * inputs, axis=1, keepdims=True) + target_b # samples # (n, num_samples) sample_b = S.reshape(sample_b, (-1,)) sample_pred = S.FullyConnected(inputs, weight=sample_w, bias=sample_b, num_hidden=num_samples) # remove accidental hits if remove_accidental_hits: label_v = S.reshape(label, (-1, 1)) sample_v = S.reshape(sample, (1, -1)) neg = S.broadcast_equal(label_v, sample_v) * -1e37 sample_pred = sample_pred + neg prob_sample = S.reshape(prob_sample, shape=(1, num_samples)) p_target = true_pred - S.log(prob_target) p_sample = S.broadcast_sub(sample_pred, S.log(prob_sample)) # return logits and new_labels # (n, 1+num_samples) logits = S.concat(p_target, p_sample, dim=1) new_targets = S.zeros_like(label) return logits, new_targets
java
public ServiceFuture<DataLakeAnalyticsAccountInner> beginUpdateAsync(String resourceGroupName, String accountName, UpdateDataLakeAnalyticsAccountParameters parameters, final ServiceCallback<DataLakeAnalyticsAccountInner> serviceCallback) { return ServiceFuture.fromResponse(beginUpdateWithServiceResponseAsync(resourceGroupName, accountName, parameters), serviceCallback); }
java
private static <T> Constructor<T> lookupConstructor(Class<T> clazz, Object[] pArguments) throws NoSuchMethodException { Class[] argTypes = extractArgumentTypes(pArguments); return clazz.getConstructor(argTypes); }
python
def listen_many(*rooms): """Listen for changes in all registered listeners in all specified rooms""" rooms = set(r.conn for r in rooms) for room in rooms: room.validate_listeners() with ARBITRATOR.condition: while any(r.connected for r in rooms): ARBITRATOR.condition.wait() rooms = [r for r in rooms if r.run_queues()] if not rooms: return
python
async def deserialize(data: dict): """ Create the object from a previously serialized object. :param data: The output of the "serialize" call Example: data = await connection1.serialize() connection2 = await Connection.deserialize(data) :return: A re-instantiated object """ return await Connection._deserialize("vcx_connection_deserialize", json.dumps(data), data.get('source_id'))
java
private void processPropertySeq(LevState levState, short _prop, int start, int limit) { byte cell; byte[][] impTab = levState.impTab; short[] impAct = levState.impAct; short oldStateSeq,actionSeq; byte level, addLevel; int start0, k; start0 = start; /* save original start position */ oldStateSeq = levState.state; cell = impTab[oldStateSeq][_prop]; levState.state = GetState(cell); /* isolate the new state */ actionSeq = impAct[GetAction(cell)]; /* isolate the action */ addLevel = impTab[levState.state][IMPTABLEVELS_RES]; if (actionSeq != 0) { switch (actionSeq) { case 1: /* init ON seq */ levState.startON = start0; break; case 2: /* prepend ON seq to current seq */ start = levState.startON; break; case 3: /* EN/AN after R+ON */ level = (byte)(levState.runLevel + 1); setLevelsOutsideIsolates(levState.startON, start0, level); break; case 4: /* EN/AN before R for NUMBERS_SPECIAL */ level = (byte)(levState.runLevel + 2); setLevelsOutsideIsolates(levState.startON, start0, level); break; case 5: /* L or S after possible relevant EN/AN */ /* check if we had EN after R/AL */ if (levState.startL2EN >= 0) { addPoint(levState.startL2EN, LRM_BEFORE); } levState.startL2EN = -1; /* not within previous if since could also be -2 */ /* check if we had any relevant EN/AN after R/AL */ if ((insertPoints.points.length == 0) || (insertPoints.size <= insertPoints.confirmed)) { /* nothing, just clean up */ levState.lastStrongRTL = -1; /* check if we have a pending conditional segment */ level = impTab[oldStateSeq][IMPTABLEVELS_RES]; if ((level & 1) != 0 && levState.startON > 0) { /* after ON */ start = levState.startON; /* reset to basic run level */ } if (_prop == _S) { /* add LRM before S */ addPoint(start0, LRM_BEFORE); insertPoints.confirmed = insertPoints.size; } break; } /* reset previous RTL cont to level for LTR text */ for (k = levState.lastStrongRTL + 1; k < start0; k++) { /* reset odd level, leave runLevel+2 as is */ levels[k] = (byte)((levels[k] - 2) & ~1); } /* mark insert points as confirmed */ insertPoints.confirmed = insertPoints.size; levState.lastStrongRTL = -1; if (_prop == _S) { /* add LRM before S */ addPoint(start0, LRM_BEFORE); insertPoints.confirmed = insertPoints.size; } break; case 6: /* R/AL after possible relevant EN/AN */ /* just clean up */ if (insertPoints.points.length > 0) /* remove all non confirmed insert points */ insertPoints.size = insertPoints.confirmed; levState.startON = -1; levState.startL2EN = -1; levState.lastStrongRTL = limit - 1; break; case 7: /* EN/AN after R/AL + possible cont */ /* check for real AN */ if ((_prop == _AN) && (dirProps[start0] == AN) && (reorderingMode != REORDER_INVERSE_FOR_NUMBERS_SPECIAL)) { /* real AN */ if (levState.startL2EN == -1) { /* if no relevant EN already found */ /* just note the rightmost digit as a strong RTL */ levState.lastStrongRTL = limit - 1; break; } if (levState.startL2EN >= 0) { /* after EN, no AN */ addPoint(levState.startL2EN, LRM_BEFORE); levState.startL2EN = -2; } /* note AN */ addPoint(start0, LRM_BEFORE); break; } /* if first EN/AN after R/AL */ if (levState.startL2EN == -1) { levState.startL2EN = start0; } break; case 8: /* note location of latest R/AL */ levState.lastStrongRTL = limit - 1; levState.startON = -1; break; case 9: /* L after R+ON/EN/AN */ /* include possible adjacent number on the left */ for (k = start0-1; k >= 0 && ((levels[k] & 1) == 0); k--) { } if (k >= 0) { addPoint(k, RLM_BEFORE); /* add RLM before */ insertPoints.confirmed = insertPoints.size; /* confirm it */ } levState.startON = start0; break; case 10: /* AN after L */ /* AN numbers between L text on both sides may be trouble. */ /* tentatively bracket with LRMs; will be confirmed if followed by L */ addPoint(start0, LRM_BEFORE); /* add LRM before */ addPoint(start0, LRM_AFTER); /* add LRM after */ break; case 11: /* R after L+ON/EN/AN */ /* false alert, infirm LRMs around previous AN */ insertPoints.size=insertPoints.confirmed; if (_prop == _S) { /* add RLM before S */ addPoint(start0, RLM_BEFORE); insertPoints.confirmed = insertPoints.size; } break; case 12: /* L after L+ON/AN */ level = (byte)(levState.runLevel + addLevel); for (k=levState.startON; k < start0; k++) { if (levels[k] < level) { levels[k] = level; } } insertPoints.confirmed = insertPoints.size; /* confirm inserts */ levState.startON = start0; break; case 13: /* L after L+ON+EN/AN/ON */ level = levState.runLevel; for (k = start0-1; k >= levState.startON; k--) { if (levels[k] == level+3) { while (levels[k] == level+3) { levels[k--] -= 2; } while (levels[k] == level) { k--; } } if (levels[k] == level+2) { levels[k] = level; continue; } levels[k] = (byte)(level+1); } break; case 14: /* R after L+ON+EN/AN/ON */ level = (byte)(levState.runLevel+1); for (k = start0-1; k >= levState.startON; k--) { if (levels[k] > level) { levels[k] -= 2; } } break; default: /* we should never get here */ throw new IllegalStateException("Internal ICU error in processPropertySeq"); } } if ((addLevel) != 0 || (start < start0)) { level = (byte)(levState.runLevel + addLevel); if (start >= levState.runStart) { for (k = start; k < limit; k++) { levels[k] = level; } } else { setLevelsOutsideIsolates(start, limit, level); } } }
python
def interpolate(features, hparams, decode_hp): """Interpolate between the first input frame and last target frame. Args: features: dict of tensors hparams: HParams, training hparams. decode_hp: HParams, decode hparams. Returns: images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) first_frame: image, 3-D Tensor, shape=(1, H, W, C) last_frame: image, 3-D Tensor, shape=(1, H, W, C) """ inputs, targets = features["inputs"], features["targets"] inputs = tf.unstack(inputs, axis=1) targets = tf.unstack(targets, axis=1) coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) # (X_1, X_t) -> (z_1, z_t) first_frame, last_frame = inputs[0], targets[-1] first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) # Interpolate latents at all levels. first_lats = first_level_eps + [first_top_z] last_lats = last_level_eps + [last_top_z] interp_lats = [] lat_iterator = enumerate(zip(first_lats, last_lats)) for level_ind, (first_lat, last_lat) in lat_iterator: if level_ind in decode_hp.level_interp: if decode_hp.channel_interp == "all": interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) else: interp_lat = glow_ops.linear_interpolate_rank( first_lat, last_lat, coeffs, decode_hp.rank_interp) else: interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) interp_lats.append(interp_lat) level_eps_interp = interp_lats[:hparams.n_levels-1] z_top_interp = interp_lats[-1] images = latents_to_frames(z_top_interp, level_eps_interp, hparams) return images, first_frame, last_frame
python
def save_intermediate_array(self, array, name): """Save intermediate array object as FITS.""" if self.intermediate_results: fits.writeto(name, array, overwrite=True)
java
public void set(String propName, Object value) { if (propName.equals(PROP_IDENTIFIER)) { setIdentifier(((IdentifierType) value)); } if (propName.equals(PROP_VIEW_IDENTIFIERS)) { getViewIdentifiers().add(((com.ibm.wsspi.security.wim.model.ViewIdentifierType) value)); } if (propName.equals(PROP_PARENT)) { setParent(((Entity) value)); } if (propName.equals(PROP_CHILDREN)) { getChildren().add(((com.ibm.wsspi.security.wim.model.Entity) value)); } if (propName.equals(PROP_GROUPS)) { getGroups().add(((com.ibm.wsspi.security.wim.model.Group) value)); } if (propName.equals(PROP_CREATE_TIMESTAMP)) { setCreateTimestamp(((Date) value)); } if (propName.equals(PROP_MODIFY_TIMESTAMP)) { setModifyTimestamp(((Date) value)); } if (propName.equals(PROP_ENTITLEMENT_INFO)) { setEntitlementInfo(((EntitlementInfoType) value)); } if (propName.equals(PROP_CHANGE_TYPE)) { setChangeType(((String) value)); } }
python
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """ warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
java
public int moveIndex(int delta) { int x = Math.max(0, Math.min(getIndex() + delta, getLength())); setIndex(x); return x; }
python
def assign_handler(query, category): """assign_handler(query, category) -- assign the user's query to a particular category, and call the appropriate handler. """ if(category == 'count lines'): handler.lines(query) elif(category == 'count words'): handler.words(query) elif(category == 'weather'): web.weather(query) elif(category == 'no match'): web.generic(query) elif(category == 'file info'): handler.file_info(query) elif(category == 'executable'): handler.make_executable(query) elif(category == 'search'): handler.search(query) elif(category == 'path'): handler.add_to_path(query) elif(category == 'uname'): handler.system_info(query) else: print 'I\'m not able to understand your query'
java
protected void initTableInfoForeignKeys(final Connection _con, final String _sql, final Map<String, TableInformation> _cache4Name) throws SQLException { Statement stmt = null; final ResultSet rs; if (_sql == null) { rs = _con.getMetaData().getImportedKeys(null, null, "%"); } else { stmt = _con.createStatement(); rs = stmt.executeQuery(_sql); } try { while (rs.next()) { final String tableName = rs.getString("TABLE_NAME").toUpperCase(); if (_cache4Name.containsKey(tableName)) { final String fkName = rs.getString("FK_NAME").toUpperCase(); final String colName = rs.getString("FKCOLUMN_NAME").toUpperCase(); final String refTableName = rs.getString("PKTABLE_NAME").toUpperCase(); final String refColName = rs.getString("PKCOLUMN_NAME").toUpperCase(); final boolean cascade = rs.getInt("DELETE_RULE") == DatabaseMetaData.importedKeyCascade; _cache4Name.get(tableName).addForeignKey(fkName, colName, refTableName, refColName, cascade); } } } finally { rs.close(); if (stmt != null) { stmt.close(); } } }
java
@Override public T addAsWebResource(Asset resource, String target) throws IllegalArgumentException { Validate.notNull(resource, "Resource should be specified"); Validate.notNull(target, "Target should be specified"); return addAsWebResource(resource, new BasicPath(target)); }
java
private Object serializeCollection(Collection pArg) { JSONArray array = new JSONArray(); for (Object value : ((Collection) pArg)) { array.add(serializeArgumentToJson(value)); } return array; }
java
@Override public String key(String key, Object[] args) { if (hasConfigValue(key)) { return getConfigValue(key, args); } return m_messages.key(key, args); }
python
def send(self, to, subject=None, body=None, reply_to=None, template=None, **kwargs): """ To send email :param to: the recipients, list or string :param subject: the subject :param body: the body :param reply_to: reply_to :param template: template, will use the templates instead :param kwargs: context args :return: bool - True if everything is ok """ sender = self.config.get("MAIL_SENDER") recipients = [to] if not isinstance(to, list) else to kwargs.update({ "subject": subject, "body": body, "reply_to": reply_to }) if not self.validated: abort("MailmanConfigurationError") if self.provider == "SES": kwargs["to"] = recipients if template: self.mail.send_template(template=template, **kwargs) else: self.mail.send(**kwargs) elif self.provider == "SMTP": if template: data = self._template(template=template, **kwargs) kwargs["subject"] = data["subject"] kwargs["body"] = data["body"] kwargs["recipients"] = recipients kwargs["sender"] = sender # Remove invalid Messages keys _safe_keys = ["recipients", "subject", "body", "html", "alts", "cc", "bcc", "attachments", "reply_to", "sender", "date", "charset", "extra_headers", "mail_options", "rcpt_options"] for k in kwargs.copy(): if k not in _safe_keys: del kwargs[k] message = flask_mail.Message(**kwargs) self.mail.send(message) else: abort("MailmanUnknownProviderError")
python
def handle_items(repo, **kwargs): """:return: repo.files()""" log.info('items: %s %s' %(repo, kwargs)) if not hasattr(repo, 'items'): return [] return [i.serialize() for i in repo.items(**kwargs)]
java
private static PBXObjectRef createPBXSourcesBuildPhase(final int buildActionMask, final List files, final boolean runOnly) { final Map map = new HashMap(); map.put("buildActionMask", String.valueOf(buildActionMask)); map.put("files", files); map.put("isa", "PBXSourcesBuildPhase"); map.put("runOnlyForDeploymentPostprocessing", toString(runOnly)); return new PBXObjectRef(map); }
java
public static Color colorWithoutAlpha( Color color ) { return new Color(color.getRed(), color.getGreen(), color.getBlue()); }
python
def configure_containers(self, current_options): """ Configures the container dict """ containers = [ ("default", "Default container. For Bash and Python 2 tasks"), ("cpp", "Contains gcc and g++ for compiling C++"), ("java7", "Contains Java 7"), ("java8scala", "Contains Java 8 and Scala"), ("mono", "Contains Mono, which allows to run C#, F# and many other languages"), ("oz", "Contains Mozart 2, an implementation of the Oz multi-paradigm language, made for education"), ("php", "Contains PHP 5"), ("pythia0compat", "Compatibility container for Pythia 0"), ("pythia1compat", "Compatibility container for Pythia 1"), ("r", "Can run R scripts"), ("sekexe", "Can run an user-mode-linux for advanced tasks") ] default_download = ["default"] self._display_question( "The tool will now propose to download some base container image for multiple languages.") self._display_question( "Please note that the download of these images can take a lot of time, so choose only the images you need") to_download = [] for container_name, description in containers: if self._ask_boolean("Download %s (%s) ?" % (container_name, description), container_name in default_download): to_download.append("ingi/inginious-c-%s" % container_name) self.download_containers(to_download, current_options) wants = self._ask_boolean("Do you want to manually add some images?", False) while wants: image = self._ask_with_default("Container image name (leave this field empty to skip)", "") if image == "": break self._display_info("Configuration of the containers done.")
python
def _get_series_list(self, others, ignore_index=False): """ Auxiliary function for :meth:`str.cat`. Turn potentially mixed input into a list of Series (elements without an index must match the length of the calling Series/Index). Parameters ---------- others : Series, Index, DataFrame, np.ndarray, list-like or list-like of objects that are Series, Index or np.ndarray (1-dim) ignore_index : boolean, default False Determines whether to forcefully align others with index of caller Returns ------- tuple : (others transformed into list of Series, boolean whether FutureWarning should be raised) """ # Once str.cat defaults to alignment, this function can be simplified; # will not need `ignore_index` and the second boolean output anymore from pandas import Index, Series, DataFrame # self._orig is either Series or Index idx = self._orig if isinstance(self._orig, Index) else self._orig.index err_msg = ('others must be Series, Index, DataFrame, np.ndarrary or ' 'list-like (either containing only strings or containing ' 'only objects of type Series/Index/list-like/np.ndarray)') # Generally speaking, all objects without an index inherit the index # `idx` of the calling Series/Index - i.e. must have matching length. # Objects with an index (i.e. Series/Index/DataFrame) keep their own # index, *unless* ignore_index is set to True. if isinstance(others, Series): warn = not others.index.equals(idx) # only reconstruct Series when absolutely necessary los = [Series(others.values, index=idx) if ignore_index and warn else others] return (los, warn) elif isinstance(others, Index): warn = not others.equals(idx) los = [Series(others.values, index=(idx if ignore_index else others))] return (los, warn) elif isinstance(others, DataFrame): warn = not others.index.equals(idx) if ignore_index and warn: # without copy, this could change "others" # that was passed to str.cat others = others.copy() others.index = idx return ([others[x] for x in others], warn) elif isinstance(others, np.ndarray) and others.ndim == 2: others = DataFrame(others, index=idx) return ([others[x] for x in others], False) elif is_list_like(others, allow_sets=False): others = list(others) # ensure iterators do not get read twice etc # in case of list-like `others`, all elements must be # either one-dimensional list-likes or scalars if all(is_list_like(x, allow_sets=False) for x in others): los = [] join_warn = False depr_warn = False # iterate through list and append list of series for each # element (which we check to be one-dimensional and non-nested) while others: nxt = others.pop(0) # nxt is guaranteed list-like by above # GH 21950 - DeprecationWarning # only allowing Series/Index/np.ndarray[1-dim] will greatly # simply this function post-deprecation. if not (isinstance(nxt, (Series, Index)) or (isinstance(nxt, np.ndarray) and nxt.ndim == 1)): depr_warn = True if not isinstance(nxt, (DataFrame, Series, Index, np.ndarray)): # safety for non-persistent list-likes (e.g. iterators) # do not map indexed/typed objects; info needed below nxt = list(nxt) # known types for which we can avoid deep inspection no_deep = ((isinstance(nxt, np.ndarray) and nxt.ndim == 1) or isinstance(nxt, (Series, Index))) # nested list-likes are forbidden: # -> elements of nxt must not be list-like is_legal = ((no_deep and nxt.dtype == object) or all(not is_list_like(x) for x in nxt)) # DataFrame is false positive of is_legal # because "x in df" returns column names if not is_legal or isinstance(nxt, DataFrame): raise TypeError(err_msg) nxt, wnx = self._get_series_list(nxt, ignore_index=ignore_index) los = los + nxt join_warn = join_warn or wnx if depr_warn: warnings.warn('list-likes other than Series, Index, or ' 'np.ndarray WITHIN another list-like are ' 'deprecated and will be removed in a future ' 'version.', FutureWarning, stacklevel=3) return (los, join_warn) elif all(not is_list_like(x) for x in others): return ([Series(others, index=idx)], False) raise TypeError(err_msg)
python
def filter_genes_dispersion(data, flavor='seurat', min_disp=None, max_disp=None, min_mean=None, max_mean=None, n_bins=20, n_top_genes=None, log=True, copy=False): """Extract highly variable genes. The normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes. This means that for each bin of mean expression, highly variable genes are selected. Parameters ---------- data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse` The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond to cells and columns to genes. flavor : {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat') Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data - the logarithm of mean and dispersion is taken internally when `log` is at its default value `True`. For 'cell_ranger', this is usually called for logarithmized data - in this case you should set `log` to `False`. In their default workflows, Seurat passes the cutoffs whereas Cell Ranger passes `n_top_genes`. min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional If `n_top_genes` unequals `None`, these cutoffs for the means and the normalized dispersions are ignored. n_bins : `int` (default: 20) Number of bins for binning the mean gene expression. Normalization is done with respect to each bin. If just a single gene falls into a bin, the normalized dispersion is artificially set to 1. You'll be informed about this if you set `settings.verbosity = 4`. n_top_genes : `int` or `None` (default: `None`) Number of highly-variable genes to keep. log : `bool`, optional (default: `True`) Use the logarithm of the mean to variance ratio. copy : `bool`, optional (default: `False`) If an :class:`~anndata.AnnData` is passed, determines whether a copy is returned. Returns ------- If an AnnData `adata` is passed, returns or updates `adata` depending on \ `copy`. It filters the `adata` and adds the annotations """ adata = data.copy() if copy else data set_initial_size(adata) if n_top_genes is not None and adata.n_vars < n_top_genes: logg.info('Skip filtering by dispersion since number of variables are less than `n_top_genes`') else: if flavor is 'svr': mu = adata.X.mean(0).A1 if issparse(adata.X) else adata.X.mean(0) sigma = np.sqrt(adata.X.multiply(adata.X).mean(0).A1 - mu ** 2) if issparse(adata.X) else adata.X.std(0) log_mu = np.log2(mu) log_cv = np.log2(sigma / mu) from sklearn.svm import SVR clf = SVR(gamma=150. / len(mu)) clf.fit(log_mu[:, None], log_cv) score = log_cv - clf.predict(log_mu[:, None]) nth_score = np.sort(score)[::-1][n_top_genes] adata._inplace_subset_var(score >= nth_score) else: from scanpy.api.pp import filter_genes_dispersion filter_genes_dispersion(adata, flavor=flavor, min_disp=min_disp, max_disp=max_disp, min_mean=min_mean, max_mean=max_mean, n_bins=n_bins, n_top_genes=n_top_genes, log=log) return adata if copy else None
python
def uninstall(self, pkgname, *args, **kwargs): """A context manager which allows uninstallation of packages from the virtualenv :param str pkgname: The name of a package to uninstall >>> venv = VirtualEnv("/path/to/venv/root") >>> with venv.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller: cleaned = uninstaller.paths >>> if cleaned: print("uninstalled packages: %s" % cleaned) """ auto_confirm = kwargs.pop("auto_confirm", True) verbose = kwargs.pop("verbose", False) with self.activated(): pathset_base = self.get_monkeypatched_pathset() dist = next( iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())), None ) pathset = pathset_base.from_dist(dist) if pathset is not None: pathset.remove(auto_confirm=auto_confirm, verbose=verbose) try: yield pathset except Exception as e: if pathset is not None: pathset.rollback() else: if pathset is not None: pathset.commit() if pathset is None: return
java
public byte minValue() { if (data.length == 0) return 0; byte min = Byte.MAX_VALUE; for (byte b : data) if (b < min) min = b; return min; }
python
def describe(self, tablename, refresh=False, metrics=False, require=False): """ Get the :class:`.TableMeta` for a table """ table = self.cached_descriptions.get(tablename) if refresh or table is None or (metrics and not table.consumed_capacity): desc = self.connection.describe_table(tablename) if desc is None: if require: raise RuntimeError("Table %r not found" % tablename) else: return None table = TableMeta.from_description(desc) self.cached_descriptions[tablename] = table if metrics: read, write = self.get_capacity(tablename) table.consumed_capacity["__table__"] = {"read": read, "write": write} for index_name in table.global_indexes: read, write = self.get_capacity(tablename, index_name) table.consumed_capacity[index_name] = {"read": read, "write": write} return table
python
def teams(self, page=None, year=None, simple=False, keys=False): """ Get list of teams. :param page: Page of teams to view. Each page contains 500 teams. :param year: View teams from a specific year. :param simple: Get only vital data. :param keys: Set to true if you only want the teams' keys rather than full data on them. :return: List of Team objects or string keys. """ # If the user has requested a specific page, get that page. if page is not None: if year: if keys: return self._get('teams/%s/%s/keys' % (year, page)) else: return [Team(raw) for raw in self._get('teams/%s/%s%s' % (year, page, '/simple' if simple else ''))] else: if keys: return self._get('teams/%s/keys' % page) else: return [Team(raw) for raw in self._get('teams/%s%s' % (page, '/simple' if simple else ''))] # If no page was specified, get all of them and combine. else: teams = [] target = 0 while True: page_teams = self.teams(page=target, year=year, simple=simple, keys=keys) if page_teams: teams.extend(page_teams) else: break target += 1 return teams
python
def create_vpnservice(subnet, router, name, admin_state_up=True, profile=None): ''' Creates a new VPN service CLI Example: .. code-block:: bash salt '*' neutron.create_vpnservice router-name name :param subnet: Subnet unique identifier for the VPN service deployment :param router: Router unique identifier for the VPN service :param name: Set a name for the VPN service :param admin_state_up: Set admin state up to true or false, default:True (Optional) :param profile: Profile to build on (Optional) :return: Created VPN service information ''' conn = _auth(profile) return conn.create_vpnservice(subnet, router, name, admin_state_up)
java
public void marshall(AdminListUserAuthEventsRequest adminListUserAuthEventsRequest, ProtocolMarshaller protocolMarshaller) { if (adminListUserAuthEventsRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(adminListUserAuthEventsRequest.getUserPoolId(), USERPOOLID_BINDING); protocolMarshaller.marshall(adminListUserAuthEventsRequest.getUsername(), USERNAME_BINDING); protocolMarshaller.marshall(adminListUserAuthEventsRequest.getMaxResults(), MAXRESULTS_BINDING); protocolMarshaller.marshall(adminListUserAuthEventsRequest.getNextToken(), NEXTTOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public SubsetMove getRandomMove(SubsetSolution solution, Random rnd) { // check size limit if(maxSizeReached(solution)){ // size limit would be exceeded return null; } // get set of candidate IDs for addition (possibly fixed IDs are discarded) Set<Integer> addCandidates = getAddCandidates(solution); // check if addition is possible if(addCandidates.isEmpty()){ return null; } // select random ID to add to selection int add = SetUtilities.getRandomElement(addCandidates, rnd); // create and return addition move return new AdditionMove(add); }
java
public static String paragraph(int sentenceCount, boolean supplemental, int randomSentencesToAdd) { String paragraphString = sentences(sentenceCount + RandomUtils.nextInt(randomSentencesToAdd), supplemental); return paragraphString; }
java
public static IType getByRelativeName( String relativeName, ITypeUsesMap typeUses ) throws ClassNotFoundException { return CommonServices.getTypeSystem().getByRelativeName(relativeName, typeUses); }
java
private void measureFloater() { int specWidth = View.MeasureSpec.makeMeasureSpec(screenSize.x, View.MeasureSpec.EXACTLY); int specHeight = View.MeasureSpec.makeMeasureSpec(screenSize.y, View.MeasureSpec.AT_MOST); mPopupView.measure(specWidth, specHeight); }
java
private FileDetails getOldestInactive() { for (FileDetails curFd : fileList) { if (curFd.pid == null) return curFd ; synchronized(activeFilesMap) { FileDetails cur4Pid = activeFilesMap.get(curFd.pid) ; if ( cur4Pid == null || cur4Pid != curFd) { return curFd ; } } } return null ; }
java
public static Date getRandomDate(Date begin, Date end, Random random) { long delay = end.getTime() - begin.getTime(); return new Date(begin.getTime() + (Math.abs(random.nextLong() % delay))); }
java
@Override public ListTrafficPoliciesResult listTrafficPolicies(ListTrafficPoliciesRequest request) { request = beforeClientExecution(request); return executeListTrafficPolicies(request); }
python
def move_group(self, group, parent, index=None): """ Move group to be a child of new parent. :param group: The group to move. :type group: :class:`keepassdb.model.Group` :param parent: The new parent for the group. :type parent: :class:`keepassdb.model.Group` :param index: The 0-based index within the parent (defaults to appending group to end of parent's children). :type index: int """ if not isinstance(group, Group): raise TypeError("group param must be of type Group") if parent is not None and not isinstance(parent, Group): raise TypeError("parent param must be of type Group") if group is parent: raise ValueError("group and parent are the same") if parent is None: parent = self.root elif parent not in self.groups: raise exc.UnboundModelError("Parent group doesn't exist / is not bound to this database.") if group not in self.groups: raise exc.UnboundModelError("Group doesn't exist / is not bound to this database.") curr_parent = group.parent curr_parent.children.remove(group) if index is None: parent.children.append(group) self.log.debug("Moving {0!r} to child of {1!r}, (appending)".format(group, parent)) else: parent.children.insert(index, group) self.log.debug("Moving {0!r} to child of {1!r}, (at position {2!r})".format(group, parent, index)) #Recurse down and reset level of all moved nodes def set_level(g): g.level = g.parent.level + 1 for child in g.children: set_level(child) group.parent = parent set_level(group) group.modified = util.now() self._rebuild_groups()
java
public String haikunate() { if (tokenHex) { tokenChars = "0123456789abcdef"; } String adjective = randomString(adjectives); String noun = randomString(nouns); StringBuilder token = new StringBuilder(); if (tokenChars != null && tokenChars.length() > 0) { for (int i = 0; i < tokenLength; i++) { token.append(tokenChars.charAt(random.nextInt(tokenChars.length()))); } } return Stream.of(adjective, noun, token.toString()) .filter(s -> s != null && !s.isEmpty()) .collect(joining(delimiter)); }
java
@XmlElementDecl(namespace = "http://www.opengis.net/gml", name = "MultiSolidCoverage", substitutionHeadNamespace = "http://www.opengis.net/gml", substitutionHeadName = "_DiscreteCoverage") public JAXBElement<MultiSolidCoverageType> createMultiSolidCoverage(MultiSolidCoverageType value) { return new JAXBElement<MultiSolidCoverageType>(_MultiSolidCoverage_QNAME, MultiSolidCoverageType.class, null, value); }
python
def validate(self): """ Perform options validation. """ # TODO: at the moment only required_languages is validated. # Maybe check other options as well? if self.required_languages: if isinstance(self.required_languages, (tuple, list)): self._check_languages(self.required_languages) else: self._check_languages(self.required_languages.keys(), extra=('default',)) for fieldnames in self.required_languages.values(): if any(f not in self.fields for f in fieldnames): raise ImproperlyConfigured( 'Fieldname in required_languages which is not in fields option.')
python
def sync_hue_db(self, *servers): """ Synchronize the Hue server's database. @param servers: Name of Hue Server roles to synchronize. Not required starting with API v10. @return: List of submitted commands. """ actual_version = self._get_resource_root().version if actual_version < 10: return self._role_cmd('hueSyncDb', servers) return self._cmd('hueSyncDb', api_version=10)
java
@Override public <T> T onlyOne(Class<T> serviceClass) { Collection<T> all = all(serviceClass); if (all.size() == 1) { return all.iterator().next(); } if (all.size() > 1) { throw new IllegalStateException( "Multiple service implementations found for " + serviceClass + ": " + toClassString(all)); } return null; }
java
protected boolean internalEquals(ValueData another) { if (another instanceof LongValueData) { return ((LongValueData)another).value == value; } return false; }
python
def _translate_src_oprnd(self, operand): """Translate source operand to a SMT expression. """ if isinstance(operand, ReilRegisterOperand): return self._translate_src_register_oprnd(operand) elif isinstance(operand, ReilImmediateOperand): return smtsymbol.Constant(operand.size, operand.immediate) else: raise Exception("Invalid operand type")
java
public DeviceData command_inout(TacoTangoDevice tacoDevice, String command, DeviceData argin) throws DevFailed { Except.throw_exception("Api_TacoFailed", "Taco protocol not supported", "TacoTangoDeviceDAODefaultImpl.command_inout()"); return null; }
python
def _multicall(hook_impls, caller_kwargs, firstresult=False): """Execute a call into multiple python functions/methods and return the result(s). ``caller_kwargs`` comes from _HookCaller.__call__(). """ __tracebackhide__ = True results = [] excinfo = None try: # run impl and wrapper setup functions in a loop teardowns = [] try: for hook_impl in reversed(hook_impls): try: args = [caller_kwargs[argname] for argname in hook_impl.argnames] except KeyError: for argname in hook_impl.argnames: if argname not in caller_kwargs: raise HookCallError( "hook call must provide argument %r" % (argname,) ) if hook_impl.hookwrapper: try: gen = hook_impl.function(*args) next(gen) # first yield teardowns.append(gen) except StopIteration: _raise_wrapfail(gen, "did not yield") else: res = hook_impl.function(*args) if res is not None: results.append(res) if firstresult: # halt further impl calls break except BaseException: excinfo = sys.exc_info() finally: if firstresult: # first result hooks return a single value outcome = _Result(results[0] if results else None, excinfo) else: outcome = _Result(results, excinfo) # run all wrapper post-yield blocks for gen in reversed(teardowns): try: gen.send(outcome) _raise_wrapfail(gen, "has second yield") except StopIteration: pass return outcome.get_result()
java
public UserManagedCacheBuilder<K, V, T> withValueSerializingCopier() { UserManagedCacheBuilder<K, V, T> otherBuilder = new UserManagedCacheBuilder<>(this); otherBuilder.valueCopier = null; otherBuilder.useValueSerializingCopier = true; return otherBuilder; }
java
public String getLicenseKey() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { sm.checkPermission(new HazelcastRuntimePermission("com.hazelcast.config.Config.getLicenseKey")); } return licenseKey; }
python
def rebase_all_branches(self): """ Rebase all branches, if possible. """ col_width = max(len(b.name) for b in self.branches) + 1 if self.repo.head.is_detached: raise GitError("You're not currently on a branch. I'm exiting" " in case you're in the middle of something.") original_branch = self.repo.active_branch with self.git.stasher() as stasher: for branch in self.branches: target = self.target_map[branch.name] # Print branch name if branch.name == original_branch.name: attrs = ['bold'] else: attrs = [] print(colored(branch.name.ljust(col_width), attrs=attrs), end=' ') # Check, if target branch exists try: if target.name.startswith('./'): # Check, if local branch exists self.git.rev_parse(target.name[2:]) else: # Check, if remote branch exists _ = target.commit except (ValueError, GitError): # Remote branch doesn't exist! print(colored('error: remote branch doesn\'t exist', 'red')) self.states.append('remote branch doesn\'t exist') continue # Get tracking branch if target.is_local: target = find(self.repo.branches, lambda b: b.name == target.name[2:]) # Check status and act appropriately if target.commit.hexsha == branch.commit.hexsha: print(colored('up to date', 'green')) self.states.append('up to date') continue # Do not do anything base = self.git.merge_base(branch.name, target.name) if base == target.commit.hexsha: print(colored('ahead of upstream', 'cyan')) self.states.append('ahead') continue # Do not do anything fast_fastforward = False if base == branch.commit.hexsha: print(colored('fast-forwarding...', 'yellow'), end='') self.states.append('fast-forwarding') # Don't fast fast-forward the currently checked-out branch fast_fastforward = (branch.name != self.repo.active_branch.name) elif not self.settings['rebase.auto']: print(colored('diverged', 'red')) self.states.append('diverged') continue # Do not do anything else: print(colored('rebasing', 'yellow'), end='') self.states.append('rebasing') if self.settings['rebase.show-hashes']: print(' {}..{}'.format(base[0:7], target.commit.hexsha[0:7])) else: print() self.log(branch, target) if fast_fastforward: branch.commit = target.commit else: stasher() self.git.checkout(branch.name) self.git.rebase(target) if (self.repo.head.is_detached # Only on Travis CI, # we get a detached head after doing our rebase *confused*. # Running self.repo.active_branch would fail. or not self.repo.active_branch.name == original_branch.name): print(colored('returning to {0}'.format(original_branch.name), 'magenta')) original_branch.checkout()
java
public void onUnknownHost(String host, SshPublicKey pk) { try { System.out.println("The host " + host + " is currently unknown to the system"); System.out.println("The MD5 host key " + "(" + pk.getAlgorithm() + ") fingerprint is: " + pk.getFingerprint()); System.out.println("The SHA1 host key " + "(" + pk.getAlgorithm() + ") fingerprint is: " + SshKeyFingerprint.getFingerprint(pk.getEncoded(), SshKeyFingerprint.SHA1_FINGERPRINT)); try { System.out.println("The SHA256 host key " + "(" + pk.getAlgorithm() + ") fingerprint is: " + SshKeyFingerprint.getFingerprint(pk.getEncoded(), SshKeyFingerprint.SHA256_FINGERPRINT)); } catch (Exception ex) { } getResponse(host, pk); } catch (Exception e) { e.printStackTrace(); } }
python
def rasterToPolygon(raster_file, polygon_file): """ Converts watershed raster to polygon and then dissolves it. It dissolves features based on the LINKNO attribute. """ log("Process: Raster to Polygon ...") time_start = datetime.utcnow() temp_polygon_file = \ "{0}_temp.shp".format( os.path.splitext(os.path.basename(polygon_file))[0]) GDALGrid(raster_file).to_polygon(out_shapefile=temp_polygon_file, fieldname="LINKNO", self_mask=True) log("Time to convert to polygon: {0}" .format(datetime.utcnow()-time_start)) log("Dissolving ...") time_start_dissolve = datetime.utcnow() ogr_polygin_shapefile = ogr.Open(temp_polygon_file) ogr_polygon_shapefile_lyr = ogr_polygin_shapefile.GetLayer() number_of_features = ogr_polygon_shapefile_lyr.GetFeatureCount() polygon_rivid_list = np.zeros(number_of_features, dtype=np.int32) for feature_idx, catchment_feature in \ enumerate(ogr_polygon_shapefile_lyr): polygon_rivid_list[feature_idx] = \ catchment_feature.GetField('LINKNO') shp_drv = ogr.GetDriverByName('ESRI Shapefile') # Remove output shapefile if it already exists if os.path.exists(polygon_file): shp_drv.DeleteDataSource(polygon_file) dissolve_shapefile = shp_drv.CreateDataSource(polygon_file) dissolve_layer = \ dissolve_shapefile.CreateLayer( '', ogr_polygon_shapefile_lyr.GetSpatialRef(), ogr.wkbPolygon) dissolve_layer.CreateField(ogr.FieldDefn('LINKNO', ogr.OFTInteger)) dissolve_layer_defn = dissolve_layer.GetLayerDefn() for unique_rivid in np.unique(polygon_rivid_list): # get indices where it is in the polygon feature_indices = np.where(polygon_rivid_list == unique_rivid)[0] new_feat = ogr.Feature(dissolve_layer_defn) new_feat.SetField('LINKNO', int(unique_rivid)) if len(feature_indices) == 1: # write feature to file feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_indices[0]) new_feat.SetGeometry(feature.GetGeometryRef()) else: # dissolve dissolve_poly_list = [] for feature_index in feature_indices: feature = \ ogr_polygon_shapefile_lyr.GetFeature(feature_index) feat_geom = feature.GetGeometryRef() dissolve_poly_list.append( shapely_loads(feat_geom.ExportToWkb())) dissolve_polygon = cascaded_union(dissolve_poly_list) new_feat.SetGeometry( ogr.CreateGeometryFromWkb(dissolve_polygon.wkb)) dissolve_layer.CreateFeature(new_feat) # clean up shp_drv.DeleteDataSource(temp_polygon_file) log("Time to dissolve: {0}".format(datetime.utcnow() - time_start_dissolve)) log("Total time to convert: {0}".format(datetime.utcnow() - time_start))
java
public void init(final String path) throws IOException { SecurityHelper.doPrivilegedIOExceptionAction(new PrivilegedExceptionAction<Object>() { public Object run() throws Exception { baseDir = new File(path); return null; } }); }
python
def parse_runtime_limit(value, now=None): """Parsing CLI option for runtime limit, supplied as VALUE. Value could be something like: Sunday 23:00-05:00, the format being [Wee[kday]] [hh[:mm][-hh[:mm]]]. The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future. """ def extract_time(value): value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict() return timedelta(hours=int(value['hours']), minutes=int(value['minutes'])) def extract_weekday(value): key = value[:3].lower() try: return { 'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5, 'sun': 6, }[key] except KeyError: raise ValueError("%s is not a good weekday name." % value) if now is None: now = datetime.now() today = now.date() g = _RE_RUNTIMELIMIT_FULL.search(value) if not g: raise ValueError('"%s" does not seem to be correct format for ' 'parse_runtime_limit() ' '[Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value) pieces = g.groupdict() if pieces['weekday_begin'] is None: # No weekday specified. So either today or tomorrow first_occasion_day = timedelta(days=0) next_occasion_delta = timedelta(days=1) else: # If given 'Mon' then we transform it to 'Mon-Mon' if pieces['weekday_end'] is None: pieces['weekday_end'] = pieces['weekday_begin'] # Day range weekday_begin = extract_weekday(pieces['weekday_begin']) weekday_end = extract_weekday(pieces['weekday_end']) if weekday_begin <= today.weekday() <= weekday_end: first_occasion_day = timedelta(days=0) else: days = (weekday_begin - today.weekday()) % 7 first_occasion_day = timedelta(days=days) weekday = (now + first_occasion_day).weekday() if weekday < weekday_end: # Fits in the same week next_occasion_delta = timedelta(days=1) else: # The week after days = weekday_begin - weekday + 7 next_occasion_delta = timedelta(days=days) if pieces['hour_begin'] is None: pieces['hour_begin'] = '00:00' if pieces['hour_end'] is None: pieces['hour_end'] = '00:00' beginning_time = extract_time(pieces['hour_begin']) ending_time = extract_time(pieces['hour_end']) if not ending_time: ending_time = beginning_time + timedelta(days=1) elif beginning_time and ending_time and beginning_time > ending_time: ending_time += timedelta(days=1) start_time = real_datetime.combine(today, real_time(hour=0, minute=0)) current_range = ( start_time + first_occasion_day + beginning_time, start_time + first_occasion_day + ending_time ) if now > current_range[1]: current_range = tuple(t + next_occasion_delta for t in current_range) future_range = ( current_range[0] + next_occasion_delta, current_range[1] + next_occasion_delta ) return current_range, future_range
python
def inference(self, kern, X, likelihood, Y, Y_metadata=None): """ Returns a GridPosterior class containing essential quantities of the posterior """ N = X.shape[0] #number of training points D = X.shape[1] #number of dimensions Kds = np.zeros(D, dtype=object) #vector for holding covariance per dimension Qs = np.zeros(D, dtype=object) #vector for holding eigenvectors of covariance per dimension QTs = np.zeros(D, dtype=object) #vector for holding transposed eigenvectors of covariance per dimension V_kron = 1 # kronecker product of eigenvalues # retrieve the one-dimensional variation of the designated kernel oneDkernel = kern.get_one_dimensional_kernel(D) for d in range(D): xg = list(set(X[:,d])) #extract unique values for a dimension xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] Kds[d] = oneDkernel.K(xg) [V, Q] = np.linalg.eig(Kds[d]) V_kron = np.kron(V_kron, V) Qs[d] = Q QTs[d] = Q.T noise = likelihood.variance + 1e-8 alpha_kron = self.kron_mvprod(QTs, Y) V_kron = V_kron.reshape(-1, 1) alpha_kron = alpha_kron / (V_kron + noise) alpha_kron = self.kron_mvprod(Qs, alpha_kron) log_likelihood = -0.5 * (np.dot(Y.T, alpha_kron) + np.sum((np.log(V_kron + noise))) + N*log_2_pi) # compute derivatives wrt parameters Thete derivs = np.zeros(D+2, dtype='object') for t in range(len(derivs)): dKd_dTheta = np.zeros(D, dtype='object') gamma = np.zeros(D, dtype='object') gam = 1 for d in range(D): xg = list(set(X[:,d])) xg = np.reshape(xg, (len(xg), 1)) oneDkernel.lengthscale = kern.lengthscale[d] if t < D: dKd_dTheta[d] = oneDkernel.dKd_dLen(xg, (t==d), lengthscale=kern.lengthscale[t]) #derivative wrt lengthscale elif (t == D): dKd_dTheta[d] = oneDkernel.dKd_dVar(xg) #derivative wrt variance else: dKd_dTheta[d] = np.identity(len(xg)) #derivative wrt noise gamma[d] = np.diag(np.dot(np.dot(QTs[d], dKd_dTheta[d].T), Qs[d])) gam = np.kron(gam, gamma[d]) gam = gam.reshape(-1,1) kappa = self.kron_mvprod(dKd_dTheta, alpha_kron) derivs[t] = 0.5*np.dot(alpha_kron.T,kappa) - 0.5*np.sum(gam / (V_kron + noise)) # separate derivatives dL_dLen = derivs[:D] dL_dVar = derivs[D] dL_dThetaL = derivs[D+1] return GridPosterior(alpha_kron=alpha_kron, QTs=QTs, Qs=Qs, V_kron=V_kron), \ log_likelihood, {'dL_dLen':dL_dLen, 'dL_dVar':dL_dVar, 'dL_dthetaL':dL_dThetaL}
python
def draw_tree_grid(self, nrows=None, ncols=None, start=0, fixed_order=False, shared_axis=False, **kwargs): """ Draw a slice of x*y trees into a x,y grid non-overlapping. Parameters: ----------- x (int): Number of grid cells in x dimension. Default=automatically set. y (int): Number of grid cells in y dimension. Default=automatically set. start (int): Starting index of tree slice from .treelist. kwargs (dict): Toytree .draw() arguments as a dictionary. """ # return nothing if tree is empty if not self.treelist: print("Treelist is empty") return None, None # make a copy of the treelist so we don't modify the original if not fixed_order: treelist = self.copy().treelist else: if fixed_order is True: fixed_order = self.treelist[0].get_tip_labels() treelist = [ ToyTree(i, fixed_order=fixed_order) for i in self.copy().treelist ] # apply kwargs styles to the individual tree styles for tree in treelist: tree.style.update(kwargs) # get reasonable values for x,y given treelist length if not (ncols or nrows): ncols = 5 nrows = 1 elif not (ncols and nrows): if ncols: if ncols == 1: if self.ntrees <= 5: nrows = self.ntrees else: nrows = 2 else: if self.ntrees <= 10: nrows = 2 else: nrows = 3 if nrows: if nrows == 1: if self.ntrees <= 5: ncols = self.ntrees else: ncols = 5 else: if self.ntrees <= 10: ncols = 5 else: ncols = 3 else: pass # Return TereGrid object for debugging draw = TreeGrid(treelist) if kwargs.get("debug"): return draw # Call update to draw plot. Kwargs still here for width, height, axes canvas, axes = draw.update(nrows, ncols, start, shared_axis, **kwargs) return canvas, axes
java
public static void run(final Properties properties) throws IOException { // read parameters for output (do this at the beginning to avoid unnecessary reading) File outputFile = new File(properties.getProperty(OUTPUT_FILE)); Boolean overwrite = Boolean.parseBoolean(properties.getProperty(OUTPUT_OVERWRITE, "false")); PrintStream outStatistics = null; if (outputFile.exists() && !overwrite) { throw new IllegalArgumentException("Cannot generate statistics because " + outputFile + " exists and overwrite is " + overwrite); } else { outStatistics = new PrintStream(outputFile, "UTF-8"); } // read format String format = properties.getProperty(INPUT_FORMAT); // read users to avoid String[] usersToAvoidArray = properties.getProperty(AVOID_USERS, "").split(","); Set<String> usersToAvoid = new HashSet<String>(); for (String u : usersToAvoidArray) { usersToAvoid.add(u); } try { // read baseline <-- this file is mandatory File baselineFile = new File(properties.getProperty(BASELINE_FILE)); Map<String, Map<String, Double>> baselineMapMetricUserValues = readMetricFile(baselineFile, format, usersToAvoid); // read methods <-- at least one file should be provided String[] methodFiles = properties.getProperty(TEST_METHODS_FILES).split(","); if (methodFiles.length < 1) { throw new IllegalArgumentException("At least one test file should be provided!"); } Map<String, Map<String, Map<String, Double>>> methodsMapMetricUserValues = new HashMap<String, Map<String, Map<String, Double>>>(); for (String m : methodFiles) { File file = new File(m); Map<String, Map<String, Double>> mapMetricUserValues = readMetricFile(file, format, usersToAvoid); methodsMapMetricUserValues.put(m, mapMetricUserValues); } run(properties, outStatistics, baselineFile.getName(), baselineMapMetricUserValues, methodsMapMetricUserValues); } finally { // close files outStatistics.close(); } }
java
@Override public QueryObjectsResult queryObjects(QueryObjectsRequest request) { request = beforeClientExecution(request); return executeQueryObjects(request); }
python
def split_pulls(all_issues, project="arokem/python-matlab-bridge"): """split a list of closed issues into non-PR Issues and Pull Requests""" pulls = [] issues = [] for i in all_issues: if is_pull_request(i): pull = get_pull_request(project, i['number'], auth=True) pulls.append(pull) else: issues.append(i) return issues, pulls
java
public void init(FilterConfig filterConfig) throws ServletException { // need the Context for Logging, instantiating ClassLoader, etc ServletContextAccess sca=new ServletContextAccess(filterConfig); if(access==null) { access = sca; } // Set Protected getter with base Access, for internal class instantiations init(new FCGet(access, sca.context(), filterConfig)); }
java
@Pure public boolean isTemporaryChange() { final Object src = getSource(); if (src instanceof MapLayer) { return ((MapLayer) src).isTemporaryLayer(); } return false; }
python
def violin(adata, keys, groupby=None, log=False, use_raw=None, stripplot=True, jitter=True, size=1, scale='width', order=None, multi_panel=None, show=None, xlabel='', rotation=None, save=None, ax=None, **kwds): """\ Violin plot. Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`. Parameters ---------- adata : :class:`~anndata.AnnData` Annotated data matrix. keys : `str` or list of `str` Keys for accessing variables of `.var_names` or fields of `.obs`. groupby : `str` or `None`, optional (default: `None`) The key of the observation grouping to consider. log : `bool`, optional (default: `False`) Plot on logarithmic axis. use_raw : `bool`, optional (default: `None`) Use `raw` attribute of `adata` if present. multi_panel : `bool`, optional (default: `False`) Display keys in multiple panels also when `groupby is not None`. stripplot : `bool` optional (default: `True`) Add a stripplot on top of the violin plot. See `seaborn.stripplot`. jitter : `float` or `bool`, optional (default: `True`) Add jitter to the stripplot (only when stripplot is True) See `seaborn.stripplot`. size : int, optional (default: 1) Size of the jitter points. order : list of str, optional (default: `True`) Order in which to show the categories. scale : {{'area', 'count', 'width'}}, optional (default: 'width') The method used to scale the width of each violin. If 'area', each violin will have the same area. If 'count', the width of the violins will be scaled by the number of observations in that bin. If 'width', each violin will have the same width. xlabel : `str`, optional (default: `''`) Label of the x axis. Defaults to `groupby` if `rotation` is `None`, otherwise, no label is shown. rotation : `float`, optional (default: `None`) Rotation of xtick labels. {show_save_ax} **kwds : keyword arguments Are passed to `seaborn.violinplot`. Returns ------- A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`. """ sanitize_anndata(adata) if use_raw is None and adata.raw is not None: use_raw = True if isinstance(keys, str): keys = [keys] obs_keys = False for key in keys: if key in adata.obs_keys(): obs_keys = True if obs_keys and key not in set(adata.obs_keys()): raise ValueError( 'Either use observation keys or variable names, but do not mix. ' 'Did not find {} in adata.obs_keys().'.format(key)) if obs_keys: obs_df = adata.obs else: if groupby is None: obs_df = pd.DataFrame() else: obs_df = pd.DataFrame(adata.obs[groupby]) for key in keys: if adata.raw is not None and use_raw: X_col = adata.raw[:, key].X else: X_col = adata[:, key].X obs_df[key] = X_col if groupby is None: obs_tidy = pd.melt(obs_df, value_vars=keys) x = 'variable' ys = ['value'] else: obs_tidy = obs_df x = groupby ys = keys if multi_panel: if groupby is None and len(ys) == 1: # This is a quick and dirty way for adapting scales across several # keys if groupby is None. y = ys[0] g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False) # don't really know why this gives a warning without passing `order` g = g.map(sns.violinplot, y, inner=None, orient='vertical', scale=scale, order=keys, **kwds) if stripplot: g = g.map(sns.stripplot, y, orient='vertical', jitter=jitter, size=size, order=keys, color='black') if log: g.set(yscale='log') g.set_titles(col_template='{col_name}').set_xlabels('') if rotation is not None: for ax in g.axes[0]: ax.tick_params(labelrotation=rotation) else: if ax is None: axs, _, _, _ = setup_axes( ax=ax, panels=['x'] if groupby is None else keys, show_ticks=True, right_margin=0.3) else: axs = [ax] for ax, y in zip(axs, ys): ax = sns.violinplot(x, y=y, data=obs_tidy, inner=None, order=order, orient='vertical', scale=scale, ax=ax, **kwds) if stripplot: ax = sns.stripplot(x, y=y, data=obs_tidy, order=order, jitter=jitter, color='black', size=size, ax=ax) if xlabel == '' and groupby is not None and rotation is None: xlabel = groupby.replace('_', ' ') ax.set_xlabel(xlabel) if log: ax.set_yscale('log') if rotation is not None: ax.tick_params(labelrotation=rotation) utils.savefig_or_show('violin', show=show, save=save) if show is False: if multi_panel: return g elif len(axs) == 1: return axs[0] else: return axs
java
public void setLabel(java.lang.String label) { getStateHelper().put(PropertyKeys.label, label); }
java
protected void writeEntityMetawidget(final Map<Object, Object> context, final int entityMetawidgetIndent, final Map<String, String> existingNamespaces) { StringWriter stringWriter = new StringWriter(); this.entityMetawidget.write(stringWriter, entityMetawidgetIndent); context.put("metawidget", stringWriter.toString().trim()); Map<String, String> namespaces = this.entityMetawidget.getNamespaces(); namespaces.keySet().removeAll(existingNamespaces.keySet()); context.put("metawidgetNamespaces", namespacesToString(namespaces)); }
python
def GetDateRange(self): """Return the range over which this ServicePeriod is valid. The range includes exception dates that add service outside of (start_date, end_date), but doesn't shrink the range if exception dates take away service at the edges of the range. Returns: A tuple of "YYYYMMDD" strings, (start date, end date) or (None, None) if no dates have been given. """ start = self.start_date end = self.end_date for date, (exception_type, _) in self.date_exceptions.items(): if exception_type == self._EXCEPTION_TYPE_REMOVE: continue if not start or (date < start): start = date if not end or (date > end): end = date if start is None: start = end elif end is None: end = start # If start and end are None we did a little harmless shuffling return (start, end)
java
boolean isAuthorized(String accountId) { try { DataStore<StoredCredential> sc = StoredCredential.getDefaultDataStore(dataStoreFactory); return sc.containsKey(accountId); } catch (IOException e) { return false; } }
python
def delete_rater(self): """Action: create dialog to delete rater.""" answer = QInputDialog.getText(self, 'Delete Rater', 'Enter rater\'s name') if answer[1]: self.annot.remove_rater(answer[0]) self.display_notes() self.parent.create_menubar()
python
def set_cognitive_process(self, grade_id): """Sets the cognitive process. arg: grade_id (osid.id.Id): the new cognitive process raise: InvalidArgument - ``grade_id`` is invalid raise: NoAccess - ``grade_id`` cannot be modified raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.set_avatar_template if self.get_cognitive_process_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_id(grade_id): raise errors.InvalidArgument() self._my_map['cognitiveProcessId'] = str(grade_id)
python
def get_used_in_func(node): '''Get names, but ignore variables names to the left hand side That is to say, in case of a = b + 1 we consider b as "being accessed", while a is not. ''' if isinstance(node, ast.FunctionDef): return {node.name: get_accessed(node.body)} names = {} for node in ast.iter_child_nodes(node): names.update(get_used_in_func(node)) return names
java
public OvhOrder dedicated_nasha_new_duration_POST(String duration, OvhNasHAZoneEnum datacenter, OvhNasHAOfferEnum model) throws IOException { String qPath = "/order/dedicated/nasha/new/{duration}"; StringBuilder sb = path(qPath, duration); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "datacenter", datacenter); addBody(o, "model", model); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhOrder.class); }
python
def encode(self, name, as_map_key=False): """Returns the name the first time and the key after that""" if name in self.key_to_value: return self.key_to_value[name] return self.encache(name) if is_cacheable(name, as_map_key) else name
python
def do_decode(cls, obj, obj_type): # type: (Any, ConjureTypeType) -> Any """Decodes json into the specified type Args: obj: the json object to decode element_type: a class object which is the type we're decoding into. """ if inspect.isclass(obj_type) and issubclass( # type: ignore obj_type, ConjureBeanType ): return cls.decode_conjure_bean_type(obj, obj_type) # type: ignore elif inspect.isclass(obj_type) and issubclass( # type: ignore obj_type, ConjureUnionType ): return cls.decode_conjure_union_type(obj, obj_type) elif inspect.isclass(obj_type) and issubclass( # type: ignore obj_type, ConjureEnumType ): return cls.decode_conjure_enum_type(obj, obj_type) elif isinstance(obj_type, DictType): return cls.decode_dict(obj, obj_type.key_type, obj_type.value_type) elif isinstance(obj_type, ListType): return cls.decode_list(obj, obj_type.item_type) elif isinstance(obj_type, OptionalType): return cls.decode_optional(obj, obj_type.item_type) return cls.decode_primitive(obj, obj_type)
java
private int getToolbarHeightAdjustment(boolean bToolbarShown) { int adjustAmount = 0; if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) { boolean translucentStatus = false; // check theme attrs to see if translucent statusbar is set explicitly int[] attrs = {android.R.attr.windowTranslucentStatus}; TypedArray a = getTheme().obtainStyledAttributes(attrs); try { translucentStatus = a.getBoolean(0, false); } finally { a.recycle(); } // also check window flags in case translucent statusbar is set implicitly WindowManager.LayoutParams winParams = getWindow().getAttributes(); int bits = WindowManager.LayoutParams.FLAG_TRANSLUCENT_STATUS; if ((winParams.flags & bits) != 0) { translucentStatus = true; } if (translucentStatus) { if (bToolbarShown) { int resourceId = getResources().getIdentifier("status_bar_height", "dimen", "android"); if (resourceId > 0) { adjustAmount = getResources().getDimensionPixelSize(resourceId); } } /* Add layout listener to ensure keyboard launch resize the screen when android:windowTranslucentStatus=true * Fixing workaround found here: * http://stackoverflow.com/questions/8398102/androidwindowsoftinputmode-adjustresize-doesnt-make-any-difference */ decorView = getWindow().getDecorView(); contentView = decorView.findViewById(android.R.id.content); decorView.getViewTreeObserver().addOnGlobalLayoutListener(keyboardPresencelLayoutListener); } } return adjustAmount; }
java
private int nextChar() { String filter = mFilter; int pos = mPos; int c = (pos >= filter.length()) ? -1 : mFilter.charAt(pos); mPos = pos + 1; return c; }
java
public static double loss(double pred, double y) { final double x = -y * pred; if (x >= 30)//as x -> inf, L(x) -> x. At 30 exp(x) is O(10^13), getting unstable. L(x)-x at this value is O(10^-14), also avoids exp and log ops return x; else if (x <= -30) return 0; return log(1 + exp(x)); }
python
def vagalume(song): """ Returns the lyrics found in vagalume.com.br for the specified mp3 file or an empty string if not found. """ translate = { '@': 'a', URLESCAPE: '', ' ': '-' } artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) url = 'https://www.vagalume.com.br/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.select('div#lyrics') if body == []: return '' content = body[0] for br in content.find_all('br'): br.replace_with('\n') return content.get_text().strip()
java
private IMolecularFormulaSet returnOrdered(double mass, IMolecularFormulaSet formulaSet) { IMolecularFormulaSet solutions_new = null; if (formulaSet.size() != 0) { double valueMin = 100; int i_final = 0; solutions_new = formulaSet.getBuilder().newInstance(IMolecularFormulaSet.class); List<Integer> listI = new ArrayList<Integer>(); for (int j = 0; j < formulaSet.size(); j++) { for (int i = 0; i < formulaSet.size(); i++) { if (listI.contains(i)) continue; double value = MolecularFormulaManipulator.getTotalExactMass(formulaSet.getMolecularFormula(i)); double diff = Math.abs(mass - Math.abs(value)); if (valueMin > diff) { valueMin = diff; i_final = i; } } valueMin = 100; solutions_new.addMolecularFormula(formulaSet.getMolecularFormula(i_final)); listI.add(i_final); } } return solutions_new; }