language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def _filter(self, text): """Filter markdown.""" self.markdown.reset() return self.markdown.convert(text)
python
def sunionstore(self, dest, keys, *args): """ Store the union of sets specified by ``keys`` into a new set named ``dest``. Returns the number of members in the new set. """ keys = [self.redis_key(k) for k in self._parse_values(keys, args)] with self.pipe as pipe: return pipe.sunionstore(self.redis_key(dest), *keys)
java
public final String csv() { StringBuilder sb = new StringBuilder(); String header = "#RunID, ClientID, MsgCount, MsgBytes, MsgsPerSec, BytesPerSec, DurationSecs"; sb.append(String.format("%s stats: %s\n", name, this)); sb.append(header); sb.append("\n"); sb.append(csvLines(subs,"S")); sb.append(csvLines(pubs,"P")); return sb.toString(); }
python
def get_earth_radii(self): """Get earth radii from prologue Returns: Equatorial radius, polar radius [m] """ earth_model = self.prologue['GeometricProcessing']['EarthModel'] a = earth_model['EquatorialRadius'] * 1000 b = (earth_model['NorthPolarRadius'] + earth_model['SouthPolarRadius']) / 2.0 * 1000 return a, b
java
public PerfCounter get(String counter) { // Admited: could get a little race condition at the very beginning, but all that'll happen is that we'll lose a handful of tracking event, a loss far outweighed by overall reduced contention. if (!this.Counters.containsKey(counter)) this.Counters.put(counter, new PerfCounter(false)); return this.Counters.get(counter); }
java
public static Matcher<Result> contains(final String string) { return new Matcher<Result>() { @Override public Result matches(String input, boolean isEof) { int pos = input.indexOf(string); return pos != -1 ? success(input, input.substring(0, pos), string) : failure(input, false); } @Override public String toString() { return generateToString("contains", string); } }; }
java
public void cdataProperty(final QName tag, final String val) throws IOException { blanks(); openTagSameLine(tag); cdataValue(val); closeTagSameLine(tag); newline(); }
java
public int put(Object value) { value = makePoolValue(value); Assert.check(!(value instanceof Type.TypeVar)); Assert.check(!(value instanceof Types.UniqueType && ((UniqueType) value).type instanceof Type.TypeVar)); Integer index = indices.get(value); if (index == null) { index = pp; indices.put(value, index); pool = ArrayUtils.ensureCapacity(pool, pp); pool[pp++] = value; if (value instanceof Long || value instanceof Double) { pool = ArrayUtils.ensureCapacity(pool, pp); pool[pp++] = null; } } return index.intValue(); }
java
private void computeStackTraceInformation( StackTraceFilter stackTraceFilter, Throwable stackTraceHolder, boolean isInline) { StackTraceElement filtered = stackTraceFilter.filterFirst(stackTraceHolder, isInline); // there are corner cases where exception can have a null or empty stack trace // for example, a custom exception can override getStackTrace() method if (filtered == null) { this.stackTraceLine = "-> at <<unknown line>>"; this.sourceFile = "<unknown source file>"; } else { this.stackTraceLine = "-> at " + filtered.toString(); this.sourceFile = filtered.getFileName(); } }
java
public void submitAnswer(JSONObject answer, String realm) { if (answers == null) { answers = new JSONObject(); } try { answers.put(realm, answer); if (isAnswersFilled()) { resendRequest(); } } catch (Throwable t) { logger.error("submitAnswer failed with exception: " + t.getLocalizedMessage(), t); } }
python
def hkeys(self, name, key_start, key_end, limit=10): """ Return a list of the top ``limit`` keys between ``key_start`` and ``key_end`` in hash ``name`` Similiar with **Redis.HKEYS** .. note:: The range is (``key_start``, ``key_end``]. The ``key_start`` isn't in the range, but ``key_end`` is. :param string name: the hash name :param string key_start: The lower bound(not included) of keys to be returned, empty string ``''`` means -inf :param string key_end: The upper bound(included) of keys to be returned, empty string ``''`` means +inf :param int limit: number of elements will be returned. :return: a list of keys :rtype: list >>> ssdb.hkeys('hash_1', 'a', 'g', 10) ['b', 'c', 'd', 'e', 'f', 'g'] >>> ssdb.hkeys('hash_2', 'key ', 'key4', 3) ['key1', 'key2', 'key3'] >>> ssdb.hkeys('hash_1', 'f', '', 10) ['g'] >>> ssdb.hkeys('hash_2', 'keys', '', 10) [] """ limit = get_positive_integer('limit', limit) return self.execute_command('hkeys', name, key_start, key_end, limit)
python
def add_user_actions(self, actions=(), version='v1.0'): """ 回传数据 https://wximg.qq.com/wxp/pdftool/get.html?id=rkalQXDBM&pa=39 :param actions: 用户行为源类型 :param version: 版本号 v1.0 """ return self._post( 'user_actions/add', params={'version': version}, json={'actions': actions} )
java
public void handleHttpSession(Response serverResponse, String headerKey) { /** --------------- * Session handled * ---------------- */ if ("Set-Cookie".equals(headerKey)) { COOKIE_JSESSIONID_VALUE = serverResponse.getMetadata().get(headerKey); } }
python
def gaus_pdf(x, mean, std): '''Gaussian distribution's probability density function. See, e.g. `Wikipedia <https://en.wikipedia.org/wiki/Normal_distribution>`_. :param x: point in x-axis :type x: float or numpy.ndarray :param float mean: mean or expectation :param float str: standard deviation :returns: pdf(s) in point **x** :rtype: float or numpy.ndarray ''' return exp(-((x - mean) / std)**2 / 2) / sqrt(2 * pi) / std
python
def prior_names(self): """ get the prior information names Returns ------- prior_names : list a list of prior information names """ return list(self.prior_information.groupby( self.prior_information.index).groups.keys())
java
public void setPATT(Integer newPATT) { Integer oldPATT = patt; patt = newPATT; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.GSPT__PATT, oldPATT, patt)); }
java
@Nullable public SchemaAndTable getOverride(SchemaAndTable key) { SchemaAndTable result = nameMapping.getOverride(key).or(key); if (schemaMapping.containsKey(key.getSchema())) { result = new SchemaAndTable(schemaMapping.get(key.getSchema()), result.getTable()); } return result; }
java
public static KubernetesMessage response(String command, KubernetesResource<?> result) { KubernetesResponse response = new KubernetesResponse(); response.setCommand(command); response.setResult(result); return new KubernetesMessage(response); }
java
public PNCounterConfig getPNCounterConfig(String name) { return ConfigUtils.getConfig(configPatternMatcher, pnCounterConfigs, name, PNCounterConfig.class); }
python
def uniq(items): """Remove duplicates in given list with its order kept. >>> uniq([]) [] >>> uniq([1, 4, 5, 1, 2, 3, 5, 10]) [1, 4, 5, 2, 3, 10] """ acc = items[:1] for item in items[1:]: if item not in acc: acc += [item] return acc
python
def encoder(self, inputs, n_layers=3): """Convnet that encodes inputs into mean and std of a gaussian. Args: inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels) n_layers: Number of layers. Returns: z_mu: Mean of the latent gaussians. z_log_var: log(var) of the latent gaussians. Raises: ValueError: If inputs is not a 5-D tensor or not float32. """ latent_dims = self.hparams.z_dim shape_as_list = inputs.shape.as_list() if len(shape_as_list) != 5: raise ValueError("Expected inputs to be a 5-D, got %d" % len(shape_as_list)) if inputs.dtype != tf.float32: raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype) # Flatten (N,T,W,H,C) into (NT,W,H,C) batch_size, _ = shape_as_list[:2] inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:]) n_filters = 64 rectified = None # Applies 3 layer conv-net with padding, instance normalization # and leaky relu as per the encoder in # https://github.com/alexlee-gk/video_prediction padding = [[0, 0], [1, 1], [1, 1], [0, 0]] for i in range(n_layers): with tf.variable_scope("layer_%d" % (i + 1)): n_filters *= 2**i if i: padded = tf.pad(rectified, padding) else: padded = tf.pad(inputs, padding) convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4, strides=2, padding="VALID") normalized = tf.contrib.layers.instance_norm(convolved) rectified = tf.nn.leaky_relu(normalized, alpha=0.2) # Mean pooling across all spatial dimensions. pooled = tf.nn.avg_pool( rectified, [1] + rectified.shape[1:3].as_list() + [1], strides=[1, 1, 1, 1], padding="VALID") squeezed = tf.squeeze(pooled, [1, 2]) # Down-project and output the mean and log of the standard deviation of # the latents. with tf.variable_scope("z_mu"): z_mu = tf.layers.dense(squeezed, latent_dims) with tf.variable_scope("z_log_sigma_sq"): z_log_var = tf.layers.dense(squeezed, latent_dims) z_log_var = tf.clip_by_value(z_log_var, -10, 10) # Reshape to (batch_size X num_frames X latent_dims) z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims)) z_log_var = tf.reshape( z_log_var, (batch_size, -1, latent_dims)) return z_mu, z_log_var
java
public WordForm getDep() { if (Dependency_Type.featOkTst && ((Dependency_Type)jcasType).casFeat_dep == null) jcasType.jcas.throwFeatMissing("dep", "com.digitalpebble.rasp.Dependency"); return (WordForm)(jcasType.ll_cas.ll_getFSForRef(jcasType.ll_cas.ll_getRefValue(addr, ((Dependency_Type)jcasType).casFeatCode_dep)));}
java
private static void unloadChains(Iterator<String> chains) { final boolean bTrace = TraceComponent.isAnyTracingEnabled(); final ChannelFramework cf = ChannelFrameworkFactory.getChannelFramework(); List<String> runningChains = new LinkedList<String>(); while (chains.hasNext()) { ChainData cd = cf.getChain(chains.next()); if (null != cd && FlowType.INBOUND.equals(cd.getType())) { if (bTrace && tc.isDebugEnabled()) { Tr.debug(tc, "Unloading chain; " + cd.getName()); } try { if (cf.isChainRunning(cd)) { runningChains.add(cd.getName()); } else { cf.destroyChain(cd); cf.removeChain(cd); } } catch (Exception e) { FFDCFilter.processException(e, "ChannelUtils", "unloadChains", new Object[] { cd, cf }); if (bTrace && tc.isEventEnabled()) { Tr.event(tc, "Unable to remove chain; " + cd.getName()); } } } } // Stop chains, and wait for stop to complete... stopChains(runningChains, -1L, null); for (String name : runningChains) { ChainData cd = cf.getChain(name); if (bTrace && tc.isDebugEnabled()) { Tr.debug(tc, "Unloading stopped chain; " + name); } try { cf.destroyChain(cd); cf.removeChain(cd); } catch (Exception e) { FFDCFilter.processException(e, "ChannelUtils", "unloadChains", new Object[] { cd, cf }); if (bTrace && tc.isEventEnabled()) { Tr.event(tc, "Unable to remove chain; " + name); } } } }
python
def update(self, text, revision=None): """ Modifies the internal state based a change to the content and returns the sets of words added and removed. :Parameters: text : str The text content of a revision revision : `mixed` Revision metadata :Returns: A triple of lists: current_tokens : `list` ( :class:`~mwpersistence.Token` ) A sequence of Tokens representing the revision that was just processed. tokens_added : `list` ( :class:`~mwpersistence.Token` ) Tokens that were added while updating state. tokens_removed : `list` ( :class:`~mwpersistence.Token` ) Tokens that were removed while updating state. """ return self._update(text=text, revision=revision)
java
<T extends EventListener> ArrayList<EventListener> getListeners(Class<T> type) { ArrayList<EventListener> listeners = new ArrayList<EventListener>(); for (TangoListener tangoListener : tangoListeners) { if (tangoListener.type==type) { listeners.add(tangoListener.listener); } } return listeners; }
python
def __dbfHeaderLength(self): """Retrieves the header length of a dbf file header.""" if not self.__dbfHdrLength: if not self.dbf: raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)") dbf = self.dbf (self.numRecords, self.__dbfHdrLength) = \ unpack("<xxxxLH22x", dbf.read(32)) return self.__dbfHdrLength
python
def sum(self): """ Evaluate the integral over the given interval using Clenshaw-Curtis quadrature. """ ak = self.coefficients() ak2 = ak[::2] n = len(ak2) Tints = 2/(1-(2*np.arange(n))**2) val = np.sum((Tints*ak2.T).T, axis=0) a_, b_ = self.domain() return 0.5*(b_-a_)*val
python
def tag_users(self, tag_id, open_id_list): """ 批量为用户打标签 :param tag_id: 标签 ID :param open_id_list: 包含一个或多个用户的 OPENID 的列表 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/cgi-bin/tags/members/batchtagging", data={ "openid_list": open_id_list, "tagid": tag_id } )
java
public void afterPropertiesSet() throws Exception { scheduler = new ScheduledThreadPoolExecutor(DEFAULT_POOL, new NamedThreadFactory("Otter-Statistics-Client"), new ThreadPoolExecutor.CallerRunsPolicy()); scheduler.submit(new Runnable() { public void run() { doSendDelayCountEvent(); } }); }
java
public void restart_server() throws DevFailed { Util.out4.println("In DServer.restart_server() method"); // // Reset initial state and status // set_state(DevState.ON); set_status("The device is ON"); // // Destroy and recreate the muli attribute object // final MultiAttribute tmp = new MultiAttribute(device_name, get_device_class()); set_device_attr(tmp); // // Deleting the dserver device is a specific case. We must also delete // all // TDSOM embedded in this server // if (class_list.isEmpty() == false) { // // Destroy already registered classes, devices and commands // To destroy already created devices, we must disconnect them from // the ORB // otherwise their reference count will never decrease to 0 and the // object will // not be eligable for garbage collection. // final int nb_class = class_list.size(); final POA r_poa = Util.instance().get_poa(); for (int j = 0; j < nb_class; j++) { final Vector v = ((DeviceClass) class_list.elementAt(j)).get_device_list(); final int nb_dev = v.size(); for (int k = 0; k < nb_dev; k++) { final DeviceImpl dev = (DeviceImpl) v.elementAt(k); if (dev.get_exported_flag() == true) { dev.delete_device(); try { r_poa.deactivate_object(((DeviceImpl) v.elementAt(k)).get_obj_id()); } catch (final WrongPolicy ex) { ex.printStackTrace(); } catch (final ObjectNotActive ex) { ex.printStackTrace(); } } } v.removeAllElements(); ((DeviceClass) class_list.elementAt(j)).initClass(); } class_list.removeAllElements(); System.out.println("DServer.restart_server - class list " + class_list); } // Restart everything init_device(); // Restart polling (if any) Util.instance().polling_configure(); }
java
protected void validate(final boolean isDomain) throws MojoDeploymentException { final boolean hasServerGroups = hasServerGroups(); if (isDomain) { if (!hasServerGroups) { throw new MojoDeploymentException( "Server is running in domain mode, but no server groups have been defined."); } } else if (hasServerGroups) { throw new MojoDeploymentException("Server is running in standalone mode, but server groups have been defined."); } }
python
def roster(team_id): """Returns a dictionary of roster information for team id""" data = mlbgame.data.get_roster(team_id) parsed = json.loads(data.read().decode('utf-8')) players = parsed['roster_40']['queryResults']['row'] return {'players': players, 'team_id': team_id}
java
public static AggregatePlanNode convertToPartialAggregatePlanNode(HashAggregatePlanNode hashAggregateNode, List<Integer> aggrColumnIdxs) { final AggregatePlanNode partialAggr = setAggregatePlanNode(hashAggregateNode, new PartialAggregatePlanNode()); partialAggr.m_partialGroupByColumns = aggrColumnIdxs; return partialAggr; }
python
def _compare_list(new_list, old_list, change_list=None, root=None): ''' a method for recursively listing changes made to a list :param new_list: list with new value :param old_list: list with old values :param change_list: list of differences between old and new :param root: string with record of path to the root of the main object :return: list of differences between old and new ''' from copy import deepcopy if len(old_list) > len(new_list): same_len = len(new_list) for i in reversed(range(len(new_list), len(old_list))): new_path = deepcopy(root) new_path.append(i) change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path}) elif len(new_list) > len(old_list): same_len = len(old_list) append_list = [] path = deepcopy(root) for i in range(len(old_list), len(new_list)): append_list.append(new_list[i]) change_list.append({'action': 'APPEND', 'value': append_list, 'path': path}) else: same_len = len(new_list) for i in range(0, same_len): new_path = deepcopy(root) new_path.append(i) if new_list[i].__class__ != old_list[i].__class__: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) elif isinstance(new_list[i], dict): _compare_dict(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], list): _compare_list(new_list[i], old_list[i], change_list, new_path) elif isinstance(new_list[i], set): _compare_set(new_list[i], old_list[i], change_list, new_path) elif new_list[i] != old_list[i]: change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path}) return change_list
java
public void getEntriesAndAddListener(RosterListener rosterListener, RosterEntries rosterEntries) { Objects.requireNonNull(rosterListener, "listener must not be null"); Objects.requireNonNull(rosterEntries, "rosterEntries must not be null"); synchronized (rosterListenersAndEntriesLock) { rosterEntries.rosterEntries(entries.values()); addRosterListener(rosterListener); } }
python
def fetch_list_members(list_url): """ Get all members of the list specified by the given url. E.g., https://twitter.com/lore77/lists/libri-cultura-education """ match = re.match(r'.+twitter\.com\/(.+)\/lists\/(.+)', list_url) if not match: print('cannot parse list url %s' % list_url) return [] screen_name, slug = match.groups() print('collecting list %s/%s' % (screen_name, slug)) return twutil.collect.list_members(slug, screen_name)
python
def remove_sequences(self, tree, sequence_names): '''Remove sequences with in the given sequence_names array from the tree in place. Assumes the sequences are found in the tree, and that they are all unique. Parameters ---------- tree: dendropy.Tree tree to remove from sequence_names: list of str list of tip names to remove ''' tree.prune_taxa_with_labels(sequence_names) tree.prune_taxa_with_labels([s.replace('_',' ') for s in sequence_names])
java
public void addShutdownListener(final ShutdownListener shutdownListener) { synchronized (lock) { if (!shutdown) { throw UndertowMessages.MESSAGES.handlerNotShutdown(); } long count = activeRequestsUpdater.get(this); if (count == 0) { shutdownListener.shutdown(true); } else { shutdownListeners.add(shutdownListener); } } }
java
private static int getNumberOfInserts(String messageKey) { String unInsertedMessage = nls.getString(messageKey); int numInserts = 0; // Not much point in going any further than 20 inserts! for (int i = 0; i < 20; i++) { if (unInsertedMessage.indexOf("{" + i + "}") != -1) { numInserts++; } else { // This message insert was not found break; } } return numInserts; }
java
@Override protected boolean removeFromQueueStorage(Connection conn, IQueueMessage<Long, byte[]> _msg) { if (!(_msg instanceof UniversalIdIntQueueMessage)) { throw new IllegalArgumentException("This method requires an argument of type [" + UniversalIdIntQueueMessage.class.getName() + "]!"); } UniversalIdIntQueueMessage msg = (UniversalIdIntQueueMessage) _msg; int numRows = getJdbcHelper().execute(conn, SQL_REMOVE_FROM_QUEUE, getQueueName(), msg.getId()); return numRows > 0; }
java
public static GeneralParameterValue[] createGridGeometryGeneralParameter( RegionMap regionMap, CoordinateReferenceSystem crs ) { GeneralParameterValue[] readParams = new GeneralParameterValue[1]; Parameter<GridGeometry2D> readGG = new Parameter<GridGeometry2D>(AbstractGridFormat.READ_GRIDGEOMETRY2D); GridEnvelope2D gridEnvelope = new GridEnvelope2D(0, 0, regionMap.getCols(), regionMap.getRows()); Envelope env; double north = regionMap.getNorth(); double south = regionMap.getSouth(); double east = regionMap.getEast(); double west = regionMap.getWest(); if (crs != null) { env = new ReferencedEnvelope(west, east, south, north, crs); } else { DirectPosition2D minDp = new DirectPosition2D(west, south); DirectPosition2D maxDp = new DirectPosition2D(east, north); env = new Envelope2D(minDp, maxDp); } readGG.setValue(new GridGeometry2D(gridEnvelope, env)); readParams[0] = readGG; return readParams; }
java
public void marshall(CreateTypeRequest createTypeRequest, ProtocolMarshaller protocolMarshaller) { if (createTypeRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(createTypeRequest.getApiId(), APIID_BINDING); protocolMarshaller.marshall(createTypeRequest.getDefinition(), DEFINITION_BINDING); protocolMarshaller.marshall(createTypeRequest.getFormat(), FORMAT_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Nonnull public <V1 extends T1, V2 extends T2> LToFltBiFunctionBuilder<T1, T2> aCase(Class<V1> argC1, Class<V2> argC2, LToFltBiFunction<V1, V2> function) { PartialCaseWithFltProduct.The pc = partialCaseFactoryMethod((a1, a2) -> (argC1 == null || argC1.isInstance(a1)) && (argC2 == null || argC2.isInstance(a2))); pc.evaluate(function); return self(); }
python
def reset(self): """Reset the dataset to zero elements.""" self.data = [] self.size = 0 self.kdtree = None # KDTree self.nn_ready = False
java
public static double[] solveLinearEquationLeastSquare(double[][] matrix, double[] vector) { // We use the linear algebra package apache commons math DecompositionSolver solver = new SingularValueDecomposition(new Array2DRowRealMatrix(matrix, false)).getSolver(); return solver.solve(new ArrayRealVector(vector)).toArray(); }
python
def add_root_family(self, family_id): """Adds a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: AlreadyExists - ``family_id`` is already in hierarchy raise: NotFound - ``family_id`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchyDesignSession.add_root_bin_template if self._catalog_session is not None: return self._catalog_session.add_root_catalog(catalog_id=family_id) return self._hierarchy_session.add_root(id_=family_id)
java
public static Optional<Method> extractSetter(final Class<?> targetClass, final String propertyName, final Class<?> propertyType) { final String methodName = "set" + Utils.capitalize(propertyName); Method method; try { method = targetClass.getMethod(methodName, propertyType); } catch (NoSuchMethodException | SecurityException e) { return Optional.empty(); } method.setAccessible(true); return Optional.of(method); }
java
public void rename(String newName) { URL url = FILE_URL_TEMPLATE.build(this.getAPI().getBaseURL(), this.getID()); BoxJSONRequest request = new BoxJSONRequest(this.getAPI(), url, "PUT"); JsonObject updateInfo = new JsonObject(); updateInfo.add("name", newName); request.setBody(updateInfo.toString()); BoxAPIResponse response = request.send(); response.disconnect(); }
python
def segment_lengths(neurites, neurite_type=NeuriteType.all): '''Lengths of the segments in a collection of neurites''' def _seg_len(sec): '''list of segment lengths of a section''' return np.linalg.norm(np.diff(sec.points[:, COLS.XYZ], axis=0), axis=1) return map_segments(_seg_len, neurites, neurite_type)
python
def get_reference_data(data_dir=None): '''Obtain information for all stored references This is a nested dictionary with all the data for all the references The reference data is read from the REFERENCES.json file in the given `data_dir` directory. ''' data_dir = fix_data_dir(data_dir) reffile_path = os.path.join(data_dir, 'REFERENCES.json') return fileio.read_references(reffile_path)
python
def objects_to_record(self): """Write from object metadata to the record. Note that we don't write everything""" o = self.get_object() o.about = self._bundle.metadata.about o.identity = self._dataset.identity.ident_dict o.names = self._dataset.identity.names_dict o.contacts = self._bundle.metadata.contacts self.set_object(o)
python
def _initialize_session(self): """ :rtype: None """ session_server = core.SessionServer.create(self).value token = session_server.token.token expiry_time = self._get_expiry_timestamp(session_server) user_id = session_server.get_referenced_user().id_ self._session_context = SessionContext(token, expiry_time, user_id)
java
public static InjectorImpl create(ClassLoader loader) { synchronized (loader) { if (loader instanceof DynamicClassLoader) { InjectorImpl inject = _localManager.getLevel(loader); if (inject == null) { inject = (InjectorImpl) InjectorAmp.manager(loader).get(); _localManager.set(inject, loader); } return inject; } else { SoftReference<InjectorImpl> injectRef = _loaderManagerMap.get(loader); InjectorImpl inject = null; if (injectRef != null) { inject = injectRef.get(); if (inject != null) { return inject; } } inject = (InjectorImpl) InjectorAmp.manager(loader).get(); _loaderManagerMap.put(loader, new SoftReference<>(inject)); return inject; } } }
java
@Override public void generateSerializeOnXml(BindTypeContext context, MethodSpec.Builder methodBuilder, String serializerName, TypeName beanClass, String beanName, BindProperty property) { TypeName elementTypeName = extractTypeParameterName(property); // @formatter:off methodBuilder.beginControlFlow("if ($L!=null) ", getter(beanName, beanClass, property)); switch (collectionType) { case LIST: methodBuilder.addStatement("int n=$L.size()", getter(beanName, beanClass, property)); methodBuilder.addStatement("$T item", elementTypeName); break; case ARRAY: methodBuilder.addStatement("int n=$L.length", getter(beanName, beanClass, property)); methodBuilder.addStatement("$T item", elementTypeName); break; case SET: methodBuilder.addStatement("int n=$L.size()", getter(beanName, beanClass, property)); break; } if (property.xmlInfo.isWrappedCollection()) { methodBuilder.addCode("// write wrapper tag\n"); methodBuilder.addStatement("$L.writeStartElement($S)", serializerName, BindProperty.xmlName(property)); } BindTransform transform = BindTransformer.lookup(elementTypeName); BindProperty elementProperty = BindProperty.builder(elementTypeName, property).inCollection(true) .elementName(BindProperty.xmlNameForItem(property)).build(); switch (collectionType) { case SET: methodBuilder.beginControlFlow("for ($T item: $L)", elementTypeName, getter(beanName, beanClass, property)); break; case LIST: methodBuilder.beginControlFlow("for (int i=0; i<n; i++)"); methodBuilder.addStatement("item=$L.get(i)", getter(beanName, beanClass, property)); break; case ARRAY: methodBuilder.beginControlFlow("for (int i=0; i<n; i++)"); methodBuilder.addStatement("item=$L[i]", getter(beanName, beanClass, property)); break; } if (!TypeUtility.isTypePrimitive(elementTypeName)) { methodBuilder.beginControlFlow("if (item==null)"); methodBuilder.addStatement("$L.writeEmptyElement($S)", serializerName, BindProperty.xmlNameForItem(property)); methodBuilder.nextControlFlow("else"); transform.generateSerializeOnXml(context, methodBuilder, serializerName, null, "item", elementProperty); methodBuilder.endControlFlow(); } else { transform.generateSerializeOnXml(context, methodBuilder, serializerName, null, "item", elementProperty); } methodBuilder.endControlFlow(); if (property.xmlInfo.isWrappedCollection()) { methodBuilder.addStatement("$L.writeEndElement()", serializerName); } else { // if there's no wrap tag, we need to assure that empty collection // will be writed. // to distinguish between first empty element and empty collection, // we write an attribute emptyCollection to // say: this collection is empty methodBuilder.addCode( "// to distinguish between first empty element and empty collection, we write an attribute emptyCollection\n"); methodBuilder.beginControlFlow("if (n==0)"); methodBuilder.addStatement("$L.writeStartElement($S)", serializerName, BindProperty.xmlNameForItem(property)); methodBuilder.addStatement("$L.writeAttribute($S, $S)", serializerName, EMPTY_COLLECTION_ATTRIBUTE_NAME, "true"); methodBuilder.addStatement("$L.writeEndElement()", serializerName); methodBuilder.endControlFlow(); } methodBuilder.endControlFlow(); // @formatter:on }
python
def has_minimum_version(raises=True): """ Return if tmux meets version requirement. Version >1.8 or above. Parameters ---------- raises : bool raise exception if below minimum version requirement Returns ------- bool True if tmux meets minimum required version. Raises ------ libtmux.exc.VersionTooLow tmux version below minimum required for libtmux Notes ----- .. versionchanged:: 0.7.0 No longer returns version, returns True or False .. versionchanged:: 0.1.7 Versions will now remove trailing letters per `Issue 55`_. .. _Issue 55: https://github.com/tmux-python/tmuxp/issues/55. """ if get_version() < LooseVersion(TMUX_MIN_VERSION): if raises: raise exc.VersionTooLow( 'libtmux only supports tmux %s and greater. This system' ' has %s installed. Upgrade your tmux to use libtmux.' % (TMUX_MIN_VERSION, get_version()) ) else: return False return True
java
private int incrementReferenceCounter(final JobID jobID) { while (true) { AtomicInteger ai = this.libraryReferenceCounter.get(jobID); if (ai == null) { ai = new AtomicInteger(1); if (this.libraryReferenceCounter.putIfAbsent(jobID, ai) == null) { return 1; } // We had a race, try again } else { return ai.incrementAndGet(); } } }
python
def raw(self, query): """ make a raw query Args: query (str): solr query \*\*params: solr parameters """ clone = copy.deepcopy(self) clone.adapter._pre_compiled_query = query clone.adapter.compiled_query = query return clone
python
def handleError(self, record): ''' Override the default error handling mechanism Deal with log file rotation errors due to log file in use more softly. ''' handled = False # Can't use "salt.utils.platform.is_windows()" in this file if (sys.platform.startswith('win') and logging.raiseExceptions and sys.stderr): # see Python issue 13807 exc_type, exc, exc_traceback = sys.exc_info() try: # PermissionError is used since Python 3.3. # OSError is used for previous versions of Python. if exc_type.__name__ in ('PermissionError', 'OSError') and exc.winerror == 32: if self.level <= logging.WARNING: sys.stderr.write('[WARNING ] Unable to rotate the log file "{0}" ' 'because it is in use\n'.format(self.baseFilename) ) handled = True finally: # 'del' recommended. See documentation of # 'sys.exc_info()' for details. del exc_type, exc, exc_traceback if not handled: super(RotatingFileHandler, self).handleError(record)
java
private String[] getSparkLibConf() { String sparkHome = null; String sparkConf = null; // If user has specified version in job property. e.g. spark-version=1.6.0 final String jobSparkVer = getJobProps().get(SparkJobArg.SPARK_VERSION.azPropName); if (jobSparkVer != null) { info("This job sets spark version: " + jobSparkVer); // Spark jobtype supports this version through plugin's jobtype config sparkHome = getSparkHome(jobSparkVer); sparkConf = getSysProps().get("spark." + jobSparkVer + ".conf"); if (sparkConf == null) { sparkConf = sparkHome + "/conf"; } info("Using job specific spark: " + sparkHome + " and conf: " + sparkConf); // Override the SPARK_HOME SPARK_CONF_DIR env for HadoopSecureSparkWrapper process(spark client) getJobProps().put("env." + SPARK_HOME_ENV_VAR, sparkHome); getJobProps().put("env." + SPARK_CONF_DIR_ENV_VAR, sparkConf); } else { // User job doesn't give spark-version // Use default spark.home. Configured in the jobtype plugin's config sparkHome = getSysProps().get("spark.home"); if (sparkHome == null) { // Use system default SPARK_HOME env sparkHome = System.getenv(SPARK_HOME_ENV_VAR); } sparkConf = (System.getenv(SPARK_CONF_DIR_ENV_VAR) != null) ? System.getenv(SPARK_CONF_DIR_ENV_VAR) : (sparkHome + "/conf"); info("Using system default spark: " + sparkHome + " and conf: " + sparkConf); } if (sparkHome == null) { throw new RuntimeException("SPARK is not available on the azkaban machine."); } else { final File homeDir = new File(sparkHome); if (!homeDir.exists()) { throw new RuntimeException("SPARK home dir does not exist."); } final File confDir = new File(sparkConf); if (!confDir.exists()) { error("SPARK conf dir does not exist. Will use SPARK_HOME/conf as default."); sparkConf = sparkHome + "/conf"; } final File defaultSparkConf = new File(sparkConf + "/spark-defaults.conf"); if (!defaultSparkConf.exists()) { throw new RuntimeException("Default Spark config file spark-defaults.conf cannot" + " be found at " + defaultSparkConf); } } return new String[]{getSparkLibDir(sparkHome), sparkConf}; }
python
def convert_activation(builder, layer, input_names, output_names, keras_layer): """Convert an activation layer from keras to coreml. Parameters ---------- keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_name, output_name = (input_names[0], output_names[0]) non_linearity = _get_activation_name_from_keras_layer(keras_layer) # Add a non-linearity layer if non_linearity == 'SOFTMAX': builder.add_softmax(name = layer, input_name = input_name, output_name = output_name) return params = None if non_linearity == 'LEAKYRELU': params = [keras_layer.alpha] elif non_linearity == 'PRELU': # In Keras 1.2 PReLU layer's weights are stored as a # backend tensor, not a numpy array as it claims in documentation. shared_axes = list(keras_layer.shared_axes) if not (shared_axes == [1,2,3] or shared_axes == [1,2]): _utils.raise_error_unsupported_scenario("Shared axis not being [1,2,3] or [1,2]", 'parametric_relu', layer) params = keras.backend.eval(keras_layer.weights[0]) elif non_linearity == 'ELU': params = keras_layer.alpha elif non_linearity == 'PARAMETRICSOFTPLUS': # In Keras 1.2 Parametric Softplus layer's weights are stored as a # backend tensor, not a numpy array as it claims in documentation. alphas = keras.backend.eval(keras_layer.weights[0]) betas = keras.backend.eval(keras_layer.weights[1]) if len(alphas.shape) == 3: # (H,W,C) if not (_same_elements_per_channel(alphas) and _same_elements_per_channel(betas)): _utils.raise_error_unsupported_scenario("Different parameter values", 'parametric_softplus', layer) alphas = alphas[0,0,:] betas = betas[0,0,:] params = [alphas, betas] elif non_linearity == 'THRESHOLDEDRELU': params = keras_layer.theta else: pass # do nothing to parameters builder.add_activation(name = layer, non_linearity = non_linearity, input_name = input_name, output_name = output_name, params = params)
java
private static double branchWeight(Node node, Arborescence<Node> arborescence, Map<Node, Set<Node>> edgesParentToChild, Map<Node, Map<Node, Fragment>> edgeFragmentChildToParent) { Double nodeWeight = node.getNodeWeight(); if (nodeWeight == null) { nodeWeight = getEdgeFragmentCost(node, arborescence, edgeFragmentChildToParent) + nodeFragmentWeight(node); node.setNodeWeight(nodeWeight); } Double branchWeight = node.getBranchWeight(); if (branchWeight == null) { final double[] weight = {nodeWeight}; if (edgesParentToChild.containsKey(node)) { edgesParentToChild.get(node).forEach(child -> weight[0] += branchWeight(child, arborescence, edgesParentToChild, edgeFragmentChildToParent)); } branchWeight = weight[0]; node.setBranchWeight(branchWeight); } return branchWeight; }
python
def finish(request, socket, context): """ Event handler for a socket session ending in a room. Broadcast the user leaving and delete them from the DB. """ try: user = context["user"] except KeyError: return left = {"action": "leave", "name": user.name, "id": user.id} socket.broadcast_channel(left) user.delete()
java
public static <L, R> TypeInformation<Either<L, R>> EITHER(TypeInformation<L> leftType, TypeInformation<R> rightType) { return new EitherTypeInfo<>(leftType, rightType); }
python
def add_group_mindist(self, group_definitions, group_pairs='all', threshold=None, periodic=True): r""" Adds the minimum distance between groups of atoms to the feature list. If the groups of atoms are identical to residues, use :py:obj:`add_residue_mindist <pyemma.coordinates.data.featurizer.MDFeaturizer.add_residue_mindist>`. Parameters ---------- group_definitions : list of 1D-arrays/iterables containing the group definitions via atom indices. If there is only one group_definition, it is assumed the minimum distance within this group (excluding the self-distance) is wanted. In this case, :py:obj:`group_pairs` is ignored. group_pairs : Can be of two types: 'all' Computes minimum distances between all pairs of groups contained in the group definitions ndarray((n, 2), dtype=int): n x 2 array with the pairs of groups for which the minimum distances will be computed. threshold : float, optional, default is None distances below this threshold (in nm) will result in a feature 1.0, distances above will result in 0.0. If left to None, the numerical value will be returned periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention. """ from .distances import GroupMinDistanceFeature # Some thorough input checking and reformatting group_definitions, group_pairs, distance_list, group_identifiers = \ _parse_groupwise_input(group_definitions, group_pairs, self.logger, 'add_group_mindist') distance_list = self._check_indices(distance_list) f = GroupMinDistanceFeature(self.topology, group_definitions, group_pairs, distance_list, group_identifiers, threshold, periodic) self.__add_feature(f)
java
private String getTextLine(String[] textLines, int line) { return (isIgnoreTrailingWhiteSpaces()) ? textLines[line].trim() : textLines[line]; }
java
public ActionFuture<IndexResponse> sendDataAsync( String jsonSource, String index, String type, String id) { return indexQueryAsync(buildIndexRequest(jsonSource, index, type, id)); }
java
public void setInstanceProfiles(java.util.Collection<InstanceProfile> instanceProfiles) { if (instanceProfiles == null) { this.instanceProfiles = null; return; } this.instanceProfiles = new com.amazonaws.internal.SdkInternalList<InstanceProfile>(instanceProfiles); }
python
def get(self, section, option, type_=six.string_types, default=None): """Retrieves option from the specified section (or 'DEFAULT') and attempts to parse it as type. If the specified section does not exist or is missing a definition for the option, the value is looked up in the DEFAULT section. If there is still no definition found, the default value supplied is returned. """ return self._getinstance(section, option, type_, default)
java
private static String readObjectProperty(String ref, TypeDef source, Property property) { return ref + "." + getterOf(source, property).getName() + "()"; }
python
def _get_default_iface_linux(): # type: () -> Optional[str] """Get the default interface by reading /proc/net/route. This is the same source as the `route` command, however it's much faster to read this file than to call `route`. If it fails for whatever reason, we can fall back on the system commands (e.g for a platform that has a route command, but maybe doesn't use /proc?). """ data = _read_file('/proc/net/route') if data is not None and len(data) > 1: for line in data.split('\n')[1:-1]: iface_name, dest = line.split('\t')[:2] if dest == '00000000': return iface_name return None
python
def authentications_spec(self): """Spec for a group of authentication options""" return container_spec(authentication_objs.Authentication , dictof(string_spec(), set_options( reading = optional_spec(authentication_spec()) , writing = optional_spec(authentication_spec()) ) ) )
java
private double mdist(double[] a, double[] b) { return Math.abs(a[0] - b[0]) + Math.abs(a[1] - b[1]); }
java
@Override public CreateCodeRepositoryResult createCodeRepository(CreateCodeRepositoryRequest request) { request = beforeClientExecution(request); return executeCreateCodeRepository(request); }
java
public static @SlashedClassName String trimSignature(String signature) { if ((signature != null) && signature.startsWith(Values.SIG_QUALIFIED_CLASS_PREFIX) && signature.endsWith(Values.SIG_QUALIFIED_CLASS_SUFFIX)) { return signature.substring(1, signature.length() - 1); } return signature; }
java
public EClass getIfcWorkControl() { if (ifcWorkControlEClass == null) { ifcWorkControlEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI) .getEClassifiers().get(649); } return ifcWorkControlEClass; }
python
def request(self, method, data=None, nid=None, nid_key='nid', api_type="logic", return_response=False): """Get data from arbitrary Piazza API endpoint `method` in network `nid` :type method: str :param method: An internal Piazza API method name like `content.get` or `network.get_users` :type data: dict :param data: Key-value data to pass to Piazza in the request :type nid: str :param nid: This is the ID of the network to which the request should be made. This is optional and only to override the existing `network_id` entered when creating the class :type nid_key: str :param nid_key: Name expected by Piazza for `nid` when making request. (Usually and by default "nid", but sometimes "id" is expected) :returns: Python object containing returned data :type return_response: bool :param return_response: If set, returns whole :class:`requests.Response` object rather than just the response body """ self._check_authenticated() nid = nid if nid else self._nid if data is None: data = {} headers = {} if "session_id" in self.session.cookies: headers["CSRF-Token"] = self.session.cookies["session_id"] # Adding a nonce to the request endpoint = self.base_api_urls[api_type] if api_type == "logic": endpoint += "?method={}&aid={}".format( method, _piazza_nonce() ) response = self.session.post( endpoint, data=json.dumps({ "method": method, "params": dict({nid_key: nid}, **data) }), headers=headers ) return response if return_response else response.json()
python
def importable(obj): """Check if an object can be serialised as a qualified name. This is done by checking that a ``look_up(object_name(obj))`` gives back the same object. .. |importable| replace:: :py:func:`importable`""" try: return look_up(object_name(obj)) is obj except (AttributeError, TypeError, ImportError): return False
python
def edit_distance(wordA,wordB): """" Implements Daegmar-Levenshtein edit distance algorithm: Ref: https://en.wikipedia.org/wiki/Edit_distance Ref: https://en.wikipedia.org/wiki/Levenshtein_distance""" if not type(wordA) is list: lettersA = tamil.utf8.get_letters(wordA) else: lettersA = wordA if not type(wordB) is list: lettersB = tamil.utf8.get_letters(wordB) else: lettersB = wordB n_A = len(lettersA) n_B = len(lettersB) dist_AB = [[0 for i in range(0,n_B+1)] for i in range(0,(n_A+1))] # Target prefix reached by insertion for j in range(1,n_B+1): dist_AB[0][j] = j for i in range(1,n_A+1): dist_AB[i][0] = i for j in range(1,n_B+1): for i in range(1,n_A+1): if (lettersA[i-1] == lettersB[j-1]): new_dist = dist_AB[i-1][j-1] else: new_dist = min( [dist_AB[i-1][j]+1, dist_AB[i][j-1]+1, dist_AB[i-1][j-1]+1] ) #del, ins, or sub dist_AB[i][j] = new_dist return dist_AB[-1][-1]
java
public void prepare(Collection<String> urls, String userAgent) { List<String> safeUrls = new ArrayList<String>(urls); FetchThread threads[] = new FetchThread[PREPARE_THREAD_COUNT ]; for (int i = 0; i < PREPARE_THREAD_COUNT ; i++) { threads[i] = new FetchThread(safeUrls, userAgent); threads[i].start(); } for (int i = 0; i < PREPARE_THREAD_COUNT ; i++) { try { threads[i].join(); } catch (InterruptedException e) { } } }
java
public boolean isParserDirective() { if (getValue() == null || isCommand() || getValue().length() == 0) return false; if (getValue().charAt(0) == '_') return true; if (getValue().length() == 1 && getValue().charAt(0) == '=') return true; return false; }
python
def _get_image_entropy(self, image): """calculate the entropy of an image""" hist = image.histogram() hist_size = sum(hist) hist = [float(h) / hist_size for h in hist] return -sum([p * math.log(p, 2) for p in hist if p != 0])
python
def watch( self, username, watch={ "friend":True, "deviations":True, "journals":True, "forum_threads":True, "critiques":True, "scraps":True, "activity":True, "collections":True } ): """Watch a user :param username: The username you want to watch """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") response = self._req('/user/friends/watch/{}'.format(username), post_data={ "watch[friend]": watch['friend'], "watch[deviations]": watch['deviations'], "watch[journals]": watch['journals'], "watch[forum_threads]": watch['forum_threads'], "watch[critiques]": watch['critiques'], "watch[scraps]": watch['scraps'], "watch[activity]": watch['activity'], "watch[collections]": watch['collections'], }) return response['success']
java
@Override public void exec(Result<Object> result, Object[] args) { TableKelp tableKelp = _table.getTableKelp(); RowCursor minCursor = tableKelp.cursor(); RowCursor maxCursor = tableKelp.cursor(); minCursor.clear(); maxCursor.setKeyMax(); _whereKraken.fillMinCursor(minCursor, args); _whereKraken.fillMaxCursor(minCursor, args); //QueryKelp whereKelp = _whereExpr.bind(args); // XXX: binding should be with unique EnvKelp whereKelp = new EnvKelp(_whereKelp, args); //tableKelp.findOne(minCursor, maxCursor, whereKelp, // new FindDeleteResult(result)); _table.notifyOwner(minCursor.getKey()); // result.completed(null); result.ok(null); }
python
def authenticate(self, key_id=None, secret=None, allow_agent=False): """ :param key_id: SmartDC identifier for the ssh key :type key_id: :py:class:`basestring` :param secret: path to private rsa key :type secret: :py:class:`basestring` :param allow_agent: whether or not to try ssh-agent :type allow_agent: :py:class:`bool` If no `key_id` or `secret` were entered on initialization, or there is a need to change the existing authentication credentials, one may authenticate with a `key_id` and `secret`. """ if key_id and secret: self.auth = HTTPSignatureAuth(key_id=key_id, secret=secret, allow_agent=allow_agent)
java
public void initialize(FaxClientSpi faxClientSpi) { if(this.initialized) { throw new FaxException("Fax Modem Adapter already initialized."); } //set flag this.initialized=true; //get logger Logger logger=faxClientSpi.getLogger(); //log fax client SPI information logger.logDebug(new Object[]{"Initializing fax modem adapter of type: ",this.getClass().getName(),"\nProvider Information:\n",this.getProvider()},null); //initialize this.initializeImpl(faxClientSpi); }
java
public static void printJob(Optional<JobExecutionInfo> jobExecutionInfoOptional) { if (!jobExecutionInfoOptional.isPresent()) { System.err.println("Job id not found."); return; } JobExecutionInfo jobExecutionInfo = jobExecutionInfoOptional.get(); List<List<String>> data = new ArrayList<>(); List<String> flags = Arrays.asList("", "-"); data.add(Arrays.asList("Job Name", jobExecutionInfo.getJobName())); data.add(Arrays.asList("Job Id", jobExecutionInfo.getJobId())); data.add(Arrays.asList("State", jobExecutionInfo.getState().toString())); data.add(Arrays.asList("Completed/Launched Tasks", String.format("%d/%d", jobExecutionInfo.getCompletedTasks(), jobExecutionInfo.getLaunchedTasks()))); data.add(Arrays.asList("Start Time", dateTimeFormatter.print(jobExecutionInfo.getStartTime()))); data.add(Arrays.asList("End Time", dateTimeFormatter.print(jobExecutionInfo.getEndTime()))); data.add(Arrays.asList("Duration", jobExecutionInfo.getState() == JobStateEnum.COMMITTED ? periodFormatter .print(new Period(jobExecutionInfo.getDuration().longValue())) : "-")); data.add(Arrays.asList("Tracking URL", jobExecutionInfo.getTrackingUrl())); data.add(Arrays.asList("Launcher Type", jobExecutionInfo.getLauncherType().name())); new CliTablePrinter.Builder() .data(data) .flags(flags) .delimiterWidth(2) .build() .printTable(); JobInfoPrintUtils.printMetrics(jobExecutionInfo.getMetrics()); }
java
@Override public Dag<JobExecutionPlan> compileFlow(Spec spec) { Preconditions.checkNotNull(spec); Preconditions.checkArgument(spec instanceof FlowSpec, "MultiHopFlowCompiler only accepts FlowSpecs"); long startTime = System.nanoTime(); FlowSpec flowSpec = (FlowSpec) spec; String source = ConfigUtils.getString(flowSpec.getConfig(), ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY, ""); String destination = ConfigUtils.getString(flowSpec.getConfig(), ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY, ""); log.info(String.format("Compiling flow for source: %s and destination: %s", source, destination)); Dag<JobExecutionPlan> jobExecutionPlanDag; try { //Compute the path from source to destination. FlowGraphPath flowGraphPath = flowGraph.findPath(flowSpec); //Convert the path into a Dag of JobExecutionPlans. if (flowGraphPath != null) { jobExecutionPlanDag = flowGraphPath.asDag(this.config); } else { Instrumented.markMeter(flowCompilationFailedMeter); log.info(String.format("No path found from source: %s and destination: %s", source, destination)); return new JobExecutionPlanDagFactory().createDag(new ArrayList<>()); } } catch (PathFinder.PathFinderException | SpecNotFoundException | JobTemplate.TemplateException | URISyntaxException | ReflectiveOperationException e) { Instrumented.markMeter(flowCompilationFailedMeter); log.error(String .format("Exception encountered while compiling flow for source: %s and destination: %s", source, destination), e); return null; } Instrumented.markMeter(flowCompilationSuccessFulMeter); Instrumented.updateTimer(flowCompilationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS); return jobExecutionPlanDag; }
java
public void marshall(StopJobRequest stopJobRequest, ProtocolMarshaller protocolMarshaller) { if (stopJobRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(stopJobRequest.getArn(), ARN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def _ingest_list(self, input_list, schema_list, path_to_root): ''' a helper method for ingesting items in a list :return: valid_list ''' valid_list = [] # construct max list size max_size = None rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root) if 'max_size' in self.keyMap[rules_path_to_root].keys(): if not self.keyMap[rules_path_to_root]['max_size']: return valid_list else: max_size = self.keyMap[rules_path_to_root]['max_size'] # iterate over items in input list if input_list: rules_index = self._datatype_classes.index(schema_list[0].__class__) rules_type = self._datatype_names[rules_index] for i in range(len(input_list)): item_path = '%s[%s]' % (path_to_root, i) value_match = False try: item_index = self._datatype_classes.index(input_list[i].__class__) item_type = self._datatype_names[item_index] if item_type == rules_type: value_match = True except: value_match = False if value_match: try: if item_type == 'boolean': valid_list.append(self._validate_boolean(input_list[i], item_path)) elif item_type == 'number': valid_list.append(self._validate_number(input_list[i], item_path)) elif item_type == 'string': valid_list.append(self._validate_string(input_list[i], item_path)) elif item_type == 'map': valid_list.append(self._ingest_dict(input_list[i], schema_list[0], item_path)) elif item_type == 'list': valid_list.append(self._ingest_list(input_list[i], schema_list[0], item_path)) except: pass if isinstance(max_size, int): if len(valid_list) == max_size: return valid_list return valid_list
java
public void writeByte(byte value) throws UnsupportedEncodingException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "writeByte", Byte.valueOf(value)); getBodyList().add(Byte.valueOf(value)); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "writeByte"); }
java
@Inline(value="$3.removeAll($1, $2)", imported=Iterables.class) public static <E> boolean operator_remove(Collection<E> collection, Collection<? extends E> newElements) { return removeAll(collection, newElements); }
java
static Matrix[] jamaSVD(double[][] inputMatrix, int dimensions) { // Use reflection to load the JAMA classes and perform all the // operation in order to avoid any compile-time dependencies on the // package. try { SVD_LOGGER.fine("attempting JAMA"); isJAMAavailable(); int rows = inputMatrix.length; int cols = inputMatrix[0].length; // assume at least one row Class<?> clazz = loadJamaMatrixClass(); Constructor<?> c = clazz.getConstructor(double[][].class); Object jamaMatrix = c.newInstance(new Object[] { inputMatrix } ); Method svdMethod = clazz.getMethod("svd", new Class[] {}); Object svdObject = svdMethod.invoke(jamaMatrix, new Object[] {}); // covert the JAMA u,s,v matrices to our matrices String[] matrixMethods = new String[] {"getU", "getS", "getV"}; String[] matrixNames = new String[] {"JAMA-U", "JAMA-S", "JAMA-V"}; Matrix[] usv = new Matrix[3]; // Loop to avoid repeating reflection code for (int i = 0; i < 3; ++i) { Method matrixAccessMethod = svdObject.getClass(). getMethod(matrixMethods[i], new Class[] {}); Object matrixObject = matrixAccessMethod.invoke( svdObject, new Object[] {}); Method toArrayMethod = matrixObject.getClass(). getMethod("getArray", new Class[] {}); double[][] matrixArray = (double[][])(toArrayMethod. invoke(matrixObject, new Object[] {})); // JAMA computes the full SVD, so the output matrices need to be // truncated to the desired number of dimensions resize: switch (i) { case 0: { // U array Matrix u = Matrices.create(rows, dimensions, Type.DENSE_IN_MEMORY); // fill the U matrix by copying over the values for (int row = 0; row < rows; ++row) { for (int col = 0; col < dimensions; ++col) { u.set(row, col, matrixArray[row][col]); } } usv[i] = u; break resize; } case 1: { // S array // special case for the diagonal matrix Matrix s = new DiagonalMatrix(dimensions); for (int diag = 0; diag < dimensions; ++diag) { s.set(diag, diag, matrixArray[diag][diag]); } usv[i] = s; break resize; } case 2: { // V array // create it on disk since it's not expected that people // will access this matrix Matrix v = Matrices.create(dimensions, cols, Type.DENSE_ON_DISK); // Fill the V matrix by copying over the values. Note that // we manually transpose the matrix because JAMA returns the // result transposed from what we specify. for (int row = 0; row < dimensions; ++row) { for (int col = 0; col < cols; ++col) { v.set(row, col, matrixArray[col][row]); } } usv[i] = v; } } } return usv; } catch (ClassNotFoundException cnfe) { SVD_LOGGER.log(Level.SEVERE, "JAMA", cnfe); } catch (NoSuchMethodException nsme) { SVD_LOGGER.log(Level.SEVERE, "JAMA", nsme); } catch (InstantiationException ie) { SVD_LOGGER.log(Level.SEVERE, "JAMA", ie); } catch (IllegalAccessException iae) { SVD_LOGGER.log(Level.SEVERE, "JAMA", iae); } catch (InvocationTargetException ite) { SVD_LOGGER.log(Level.SEVERE, "JAMA", ite); } throw new UnsupportedOperationException( "JAMA-based SVD is not available on this system"); }
java
@SuppressWarnings("unchecked") public static PendingMessages parse(List<?> xpendingOutput) { LettuceAssert.notNull(xpendingOutput, "XPENDING output must not be null"); LettuceAssert.isTrue(xpendingOutput.size() == 4, "XPENDING output must have exactly four output elements"); Long count = (Long) xpendingOutput.get(0); String from = (String) xpendingOutput.get(1); String to = (String) xpendingOutput.get(2); Range<String> messageIdRange = Range.create(from, to); Collection<Object> consumerMessageCounts = (Collection) xpendingOutput.get(3); Map<String, Long> counts = new LinkedHashMap<>(); for (Object element : consumerMessageCounts) { LettuceAssert.isTrue(element instanceof List, "Consumer message counts must be a List"); List<Object> messageCount = (List) element; counts.put((String) messageCount.get(0), (Long) messageCount.get(1)); } return new PendingMessages(count, messageIdRange, Collections.unmodifiableMap(counts)); }
python
def check_filters(filters): """ Execute range_check for every element of an iterable. Parameters ---------- filters : iterable The collection of filters to check. Each element must be a two-element tuple of floats or ints. Returns ------- The input as-is, or None if it evaluates to False. Raises ------ ValueError Low is greater than or equal to high for any element. """ if not filters: return None try: return [range_check(f[0], f[1]) for f in filters] except ValueError as err: raise ValueError("Error in --filter: " + py23_str(err))
python
def _ParseRecords(self, parser_mediator, evt_file): """Parses Windows EventLog (EVT) records. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. evt_file (pyevt.file): Windows EventLog (EVT) file. """ # To handle errors when parsing a Windows EventLog (EVT) file in the most # granular way the following code iterates over every event record. The # call to evt_file.get_record() and access to members of evt_record should # be called within a try-except. for record_index in range(evt_file.number_of_records): if parser_mediator.abort: break try: evt_record = evt_file.get_record(record_index) self._ParseRecord(parser_mediator, record_index, evt_record) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse event record: {0:d} with error: {1!s}'.format( record_index, exception)) for record_index in range(evt_file.number_of_recovered_records): if parser_mediator.abort: break try: evt_record = evt_file.get_recovered_record(record_index) self._ParseRecord( parser_mediator, record_index, evt_record, recovered=True) except IOError as exception: parser_mediator.ProduceExtractionWarning(( 'unable to parse recovered event record: {0:d} with error: ' '{1!s}').format(record_index, exception))
python
def widgets(self): """Gets all (first) child wigets""" w = [] for i in range(self.count()): w.append(self.widget(i)) return w
java
@Override public MutationResult execute() { if (pendingMutations == null || pendingMutations.isEmpty()) { return new MutationResultImpl(true, 0, null); } final BatchMutation<K> mutations = pendingMutations.makeCopy(); pendingMutations = null; return new MutationResultImpl(keyspace.doExecuteOperation(new Operation<Void>(OperationType.WRITE) { @Override public Void execute(Cassandra.Client cassandra) throws Exception { cassandra.batch_mutate(mutations.getMutationMap(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType))); return null; } })); }
python
def update_requests_request_id(self, update_request, request_id): """UpdateRequestsRequestId. [Preview API] Update a symbol request by request identifier. :param :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` update_request: The symbol request. :param str request_id: The symbol request identifier. :rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` """ route_values = {} if request_id is not None: route_values['requestId'] = self._serialize.url('request_id', request_id, 'str') content = self._serialize.body(update_request, 'Request') response = self._send(http_method='PATCH', location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('Request', response)
java
public <T extends AmazonWebServiceRequest> T withSdkClientExecutionTimeout(int sdkClientExecutionTimeout) { setSdkClientExecutionTimeout(sdkClientExecutionTimeout); @SuppressWarnings("unchecked") T t = (T) this; return t; }
java
@Override public void clearCache() { entityCache.clearCache(CommerceSubscriptionEntryImpl.class); finderCache.clearCache(FINDER_CLASS_NAME_ENTITY); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITH_PAGINATION); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION); }