language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public ProcessExecutor redirectOutput(OutputStream output) { if (output == null) output = NullOutputStream.NULL_OUTPUT_STREAM; PumpStreamHandler pumps = pumps(); // Only set the output stream handler, preserve the same error stream handler return streams(new PumpStreamHandler(output, pumps == null ? null : pumps.getErr(), pumps == null ? null : pumps.getInput())); }
java
@Override public DataSet get(int[] i) { List<DataSet> list = new ArrayList<>(); for(int ex : i){ list.add(get(ex)); } return DataSet.merge(list); }
java
public static String mergeSlashesInUrl(String url) { StringBuilder builder = new StringBuilder(); boolean prevIsColon = false; boolean inMerge = false; for (int i = 0; i < url.length(); i++) { char c = url.charAt(i); if (c == ':') { prevIsColon = true; builder.append(c); } else { if (c == '/') { if (prevIsColon) { builder.append(c); inMerge = false; } else { if (!inMerge) { builder.append(c); } inMerge = true; } } else { inMerge = false; builder.append(c); } prevIsColon = false; } } return builder.toString(); }
python
def _dist_kw_arg(self, k): """ Returns a dictionary of keyword arguments for the k'th distribution. :param int k: Index of the distribution in question. :rtype: ``dict`` """ if self._dist_kw_args is not None: return { key:self._dist_kw_args[key][k,:] for key in self._dist_kw_args.keys() } else: return {}
java
public void end() { if (TransitionConfig.isPrintDebug()) { getTransitionStateHolder().end(); getTransitionStateHolder().print(); } for (int i = 0, size = mTransitionControls.size(); i < size; i++) { mTransitionControls.get(i).end(); } }
python
def infix_handle(tokens): """Process infix calls.""" func, args = get_infix_items(tokens, callback=infix_handle) return "(" + func + ")(" + ", ".join(args) + ")"
python
def return_markers(self): """Reads the notes of the Ktlx recordings. """ ent_file = self._filename.with_suffix('.ent') if not ent_file.exists(): ent_file = self._filename.with_suffix('.ent.old') try: ent_notes = _read_ent(ent_file) except (FileNotFoundError, PermissionError): markers = [] else: allnote = [] for n in ent_notes: try: n['value'].keys() allnote.append(n['value']) except AttributeError: lg.debug('Note of length {} was not ' 'converted to dict'.format(n['length'])) s_freq = self._hdr['erd']['sample_freq'] pcname = '0CFEBE72-DA20-4b3a-A8AC-CDD41BFE2F0D' note_time = [] note_name = [] note_note = [] for n in allnote: if n['Text'] == 'Analyzed Data Note': continue if not n['Text']: continue if 'User' not in n['Data'].keys(): continue user1 = n['Data']['User'] == 'Persyst' user2 = False # n['Data']['User'] == 'eeg' user3 = n['Data']['User'] == pcname user4 = n['Data']['User'] == 'XLSpike - Intracranial' user5 = n['Data']['User'] == 'XLEvent - Intracranial' if user1 or user2 or user3 or user4 or user5: continue if len(n['Data']['User']) == 0: note_name.append('-unknown-') else: note_name.append(n['Data']['User'].split()[0]) note_time.append(n['Stamp'] / s_freq) note_note.append(n['Text']) markers = [] for time, name, note in zip(note_time, note_name, note_note): m = {'name': note + ' (' + name + ')', 'start': time, 'end': time, 'chan': None, } markers.append(m) return markers
java
@Override public Thread newThread(Runnable runnable) { Runnable wrappedRunnable = new InstrumentedRunnable(runnable); Thread thread = delegate.newThread(wrappedRunnable); created.mark(); return thread; }
java
private void initializeBuiltInImplementors() { builtInImplementors.put(ArrayList.class, new ArrayListImplementor()); builtInImplementors.put(ConcurrentHashMap.class, new ConcurrentHashMapImplementor()); builtInImplementors.put(GregorianCalendar.class, new GregorianCalendarImplementor()); builtInImplementors.put(HashMap.class, new HashMapImplementor()); builtInImplementors.put(HashSet.class, new HashSetImplementor()); builtInImplementors.put(LinkedList.class, new LinkedListImplementor()); builtInImplementors.put(TreeMap.class, new TreeMapImplementor()); allImplementors.putAll(builtInImplementors); }
java
public void writeUTF(String pString) throws IOException { int numchars = pString.length(); int numbytes = 0; for (int i = 0; i < numchars; i++) { int c = pString.charAt(i); if ((c >= 0x0001) && (c <= 0x007F)) { numbytes++; } else if (c > 0x07FF) { numbytes += 3; } else { numbytes += 2; } } if (numbytes > 65535) { throw new UTFDataFormatException(); } file.write((numbytes >>> 8) & 0xFF); file.write(numbytes & 0xFF); for (int i = 0; i < numchars; i++) { int c = pString.charAt(i); if ((c >= 0x0001) && (c <= 0x007F)) { file.write(c); } else if (c > 0x07FF) { file.write(0xE0 | ((c >> 12) & 0x0F)); file.write(0x80 | ((c >> 6) & 0x3F)); file.write(0x80 | (c & 0x3F)); } else { file.write(0xC0 | ((c >> 6) & 0x1F)); file.write(0x80 | (c & 0x3F)); } } }
java
public <T> Class<T> getType(String typeName) { try { return (Class<T>) classLoader.loadClass(typeName); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Cannot find class " + typeName, e); } }
python
def makeNodeID(Rec, ndType, extras = None): """Helper to make a node ID, extras is currently not used""" if ndType == 'raw': recID = Rec else: recID = Rec.get(ndType) if recID is None: pass elif isinstance(recID, list): recID = tuple(recID) else: recID = recID extraDict = {} if extras: for tag in extras: if tag == "raw": extraDict['Tag'] = Rec else: extraDict['Tag'] = Rec.get(tag) return recID, extraDict
java
public static SubsystemSuspensionLevels findBySubsystem(EntityManager em, SubSystem subSystem) { SystemAssert.requireArgument(em != null, "Entity manager can not be null."); SystemAssert.requireArgument(subSystem != null, "Subsystem cannot be null."); TypedQuery<SubsystemSuspensionLevels> query = em.createNamedQuery("SubsystemSuspensionLevels.findBySubsystem", SubsystemSuspensionLevels.class); try { query.setParameter("subSystem", subSystem); return query.getSingleResult(); } catch (NoResultException ex) { Map<Integer, Long> levels = new HashMap<>(); levels.put(1, 60 * 60 * 1000L); levels.put(2, 10 * 60 * 60 * 1000L); levels.put(3, 24 * 60 * 60 * 1000L); levels.put(4, 3 * 24 * 60 * 60 * 1000L); levels.put(5, 10 * 24 * 60 * 60 * 1000L); SubsystemSuspensionLevels suspensionLevels = new SubsystemSuspensionLevels(null, subSystem, levels); return em.merge(suspensionLevels); } }
python
def prune_by_ngram_count_per_work(self, minimum=None, maximum=None, label=None): """Removes results rows if the n-gram count for all works bearing that n-gram is outside the range specified by `minimum` and `maximum`. That is, if a single witness of a single work has an n-gram count that falls within the specified range, all result rows for that n-gram are kept. If `label` is specified, the works checked are restricted to those associated with `label`. :param minimum: minimum n-gram count :type minimum: `int` :param maximum: maximum n-gram count :type maximum: `int` :param label: optional label to restrict requirement to :type label: `str` """ self._logger.info('Pruning results by n-gram count per work') matches = self._matches keep_ngrams = matches[constants.NGRAM_FIELDNAME].unique() if label is not None: matches = matches[matches[constants.LABEL_FIELDNAME] == label] if minimum and maximum: keep_ngrams = matches[ (matches[constants.COUNT_FIELDNAME] >= minimum) & (matches[constants.COUNT_FIELDNAME] <= maximum)][ constants.NGRAM_FIELDNAME].unique() elif minimum: keep_ngrams = matches[ matches[constants.COUNT_FIELDNAME] >= minimum][ constants.NGRAM_FIELDNAME].unique() elif maximum: keep_ngrams = matches[ self._matches[constants.COUNT_FIELDNAME] <= maximum][ constants.NGRAM_FIELDNAME].unique() self._matches = self._matches[self._matches[ constants.NGRAM_FIELDNAME].isin(keep_ngrams)]
java
@Override public String resolve(String code, Object... arguments) { return String.format(code, arguments); }
python
def get_mapping(version=1, exported_at=None, app_name=None): """ Return Heroku Connect mapping for the entire project. Args: version (int): Version of the Heroku Connect mapping, default: ``1``. exported_at (datetime.datetime): Time the export was created, default is ``now()``. app_name (str): Name of Heroku application associated with Heroku Connect the add-on. Returns: dict: Heroku Connect mapping. Note: The version does not need to be incremented. Exports from the Heroku Connect website will always have the version number ``1``. """ if exported_at is None: exported_at = timezone.now() app_name = app_name or settings.HEROKU_CONNECT_APP_NAME return { 'version': version, 'connection': { 'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID, 'app_name': app_name, 'exported_at': exported_at.isoformat(), }, 'mappings': [ model.get_heroku_connect_mapping() for model in get_heroku_connect_models() ] }
python
def broadcast(self, command, *args, **kwargs): """ Notifies each user with a specified command. """ criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL) for index, user in items(self.users()): if criterion(user, command, *args, **kwargs): self.notify(user, command, *args, **kwargs)
python
def deactivate_mfa_device(self, user_name, serial_number): """Deactivate and detach MFA Device from user if device exists.""" user = self.get_user(user_name) if serial_number not in user.mfa_devices: raise IAMNotFoundException( "Device {0} not found".format(serial_number) ) user.deactivate_mfa_device(serial_number)
python
def to_dict(self): """Return the resource as a dictionary. :rtype: dict """ result_dict = {} for column in self.__table__.columns.keys(): # pylint: disable=no-member value = result_dict[column] = getattr(self, column, None) if isinstance(value, Decimal): result_dict[column] = float(result_dict[column]) elif isinstance(value, datetime.datetime): result_dict[column] = value.isoformat() return result_dict
python
def visualize_model(onnx_model, open_browser=True, dest="index.html"): """ Creates a graph visualization of an ONNX protobuf model. It creates a SVG graph with *d3.js* and stores it into a file. :param model: ONNX model (protobuf object) :param open_browser: opens the browser :param dest: destination file Example: :: from onnxmltools.utils import visualize_model visualize_model(model) """ graph = onnx_model.graph model_info = "Model produced by: " + onnx_model.producer_name + \ " version(" + onnx_model.producer_version + ")" html_str = """ <!doctype html> <meta charset="utf-8"> <title>ONNX Visualization</title> <script src="https://d3js.org/d3.v3.min.js"></script> <link rel="stylesheet" href="styles.css"> <script src="dagre-d3.min.js"></script> <h2>[model_info]</h2> <svg id="svg-canvas" width=960 height=600></svg> <script id="js"> var g = new dagreD3.graphlib.Graph() .setGraph({}) .setDefaultEdgeLabel(function() { return {}; }); [nodes_html] g.nodes().forEach(function(v) { var node = g.node(v); // Round the corners of the nodes node.rx = node.ry = 5; }); [edges_html] // Create the renderer var render = new dagreD3.render(); // Set up an SVG group so that we can translate the final graph. var svg = d3.select("svg"), svgGroup = svg.append("g"); // Run the renderer. This is what draws the final graph. render(d3.select("svg g"), g); // Center the graph svgGroup.attr("transform", "translate(20, 20)"); svg.attr("height", g.graph().height + 40); svg.attr("width", g.graph().width + 40); </script> """ html_str = html_str.replace("[nodes_html]", "\n".join( get_nodes_builder(get_nodes(graph)))) html_str = html_str.replace("[edges_html]", "\n".join( [get_set_edge(edge[0], edge[1]) for edge in get_edges(graph)])) html_str = html_str.replace("[model_info]", model_info) Html_file = open(dest, "w") Html_file.write(html_str) Html_file.close() pkgdir = sys.modules['onnxmltools'].__path__[0] fullpath = os.path.join(pkgdir, "utils", "styles.css") shutil.copy(fullpath, os.getcwd()) fullpath = os.path.join(pkgdir, "utils", "dagre-d3.min.js") shutil.copy(fullpath, os.getcwd()) open_new_tab("file://" + os.path.realpath("index.html"))
python
def _get_render_prepared_object(cls, context, **option_values): """ Returns a fully prepared, request-aware menu object that can be used for rendering. ``context`` could be a ``django.template.Context`` object passed to ``render_from_tag()`` by a menu tag. """ ctx_vals = cls._create_contextualvals_obj_from_context(context) opt_vals = cls._create_optionvals_obj_from_values(**option_values) if issubclass(cls, models.Model): instance = cls.get_from_collected_values(ctx_vals, opt_vals) else: instance = cls.create_from_collected_values(ctx_vals, opt_vals) if not instance: return None instance.prepare_to_render(context['request'], ctx_vals, opt_vals) return instance
python
def within_hull(point, hull): '''Return true if the point is within the convex hull''' h_prev_pt = hull[-1,:] for h_pt in hull: if np.cross(h_pt-h_prev_pt, point - h_pt) >= 0: return False h_prev_pt = h_pt return True
java
public <R> JoinOperatorSetsBase<T, R> fullOuterJoin(DataSet<R> other) { return new JoinOperatorSetsBase<>(this, other, JoinHint.OPTIMIZER_CHOOSES, JoinType.FULL_OUTER); }
java
public static boolean isArrayOfUnboundedTypeVariablesOrObjects(Type[] types) { for (Type type : types) { if (Object.class.equals(type)) { continue; } if (type instanceof TypeVariable<?>) { Type[] bounds = ((TypeVariable<?>) type).getBounds(); if (bounds == null || bounds.length == 0 || (bounds.length == 1 && Object.class.equals(bounds[0]))) { continue; } } return false; } return true; }
java
public static Deferred<Tree> fetchTree(final TSDB tsdb, final int tree_id) { if (tree_id < 1 || tree_id > 65535) { throw new IllegalArgumentException("Invalid Tree ID"); } // fetch the whole row final GetRequest get = new GetRequest(tsdb.treeTable(), idToBytes(tree_id)); get.family(TREE_FAMILY); /** * Called from the GetRequest with results from storage. Loops through the * columns and loads the tree definition and rules */ final class FetchTreeCB implements Callback<Deferred<Tree>, ArrayList<KeyValue>> { @Override public Deferred<Tree> call(ArrayList<KeyValue> row) throws Exception { if (row == null || row.isEmpty()) { return Deferred.fromResult(null); } final Tree tree = new Tree(); // WARNING: Since the JSON in storage doesn't store the tree ID, we need // to loadi t from the row key. tree.setTreeId(bytesToId(row.get(0).key())); for (KeyValue column : row) { if (Bytes.memcmp(TREE_QUALIFIER, column.qualifier()) == 0) { // it's *this* tree. We deserialize to a new object and copy // since the columns could be in any order and we may get a rule // before the tree object final Tree local_tree = JSON.parseToObject(column.value(), Tree.class); tree.created = local_tree.created; tree.description = local_tree.description; tree.name = local_tree.name; tree.notes = local_tree.notes; tree.strict_match = local_tree.strict_match; tree.enabled = local_tree.enabled; tree.store_failures = local_tree.store_failures; // Tree rule } else if (Bytes.memcmp(TreeRule.RULE_PREFIX(), column.qualifier(), 0, TreeRule.RULE_PREFIX().length) == 0) { final TreeRule rule = TreeRule.parseFromStorage(column); tree.addRule(rule); } } return Deferred.fromResult(tree); } } // issue the get request return tsdb.getClient().get(get).addCallbackDeferring(new FetchTreeCB()); }
java
protected boolean setFieldIfNecessary(String field, Object value) { if (!isOpen()) throw new ClosedObjectException("The Document object is closed."); if (!compareFieldValue(field, value)) { _doc.setField(field, value); return true; } return false; }
java
@Override public boolean tryToExpand(double splitConfidence, double tieThreshold) { // splitConfidence. Hoeffding Bound test parameter. // tieThreshold. Hoeffding Bound test parameter. SplitCriterion splitCriterion = new SDRSplitCriterionAMRules(); //SplitCriterion splitCriterion = new SDRSplitCriterionAMRulesNode();//JD for assessing only best branch // Using this criterion, find the best split per attribute and rank the results AttributeSplitSuggestion[] bestSplitSuggestions = this.getBestSplitSuggestions(splitCriterion); Arrays.sort(bestSplitSuggestions); // Declare a variable to determine if any of the splits should be performed boolean shouldSplit = false; // If only one split was returned, use it if (bestSplitSuggestions.length < 2) { shouldSplit = ((bestSplitSuggestions.length > 0) && (bestSplitSuggestions[0].merit > 0)); bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; } // Otherwise, consider which of the splits proposed may be worth trying else { // Determine the hoeffding bound value, used to select how many instances should be used to make a test decision // to feel reasonably confident that the test chosen by this sample is the same as what would be chosen using infinite examples double hoeffdingBound = computeHoeffdingBound(1, splitConfidence, getInstancesSeen()); // Determine the top two ranked splitting suggestions bestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 1]; AttributeSplitSuggestion secondBestSuggestion = bestSplitSuggestions[bestSplitSuggestions.length - 2]; // If the upper bound of the sample mean for the ratio of SDR(best suggestion) to SDR(second best suggestion), // as determined using the hoeffding bound, is less than 1, then the true mean is also less than 1, and thus at this // particular moment of observation the bestSuggestion is indeed the best split option with confidence 1-delta, and // splitting should occur. // Alternatively, if two or more splits are very similar or identical in terms of their splits, then a threshold limit // (default 0.05) is applied to the hoeffding bound; if the hoeffding bound is smaller than this limit then the two // competing attributes are equally good, and the split will be made on the one with the higher SDR value. if (bestSuggestion.merit > 0) { if ((((secondBestSuggestion.merit / bestSuggestion.merit) + hoeffdingBound) < 1) || (hoeffdingBound < tieThreshold)) { shouldSplit = true; } } } if (shouldSplit) { AttributeSplitSuggestion splitDecision = bestSplitSuggestions[bestSplitSuggestions.length - 1]; double minValue = Double.MAX_VALUE; double[] branchMerits = SDRSplitCriterionAMRules.computeBranchSplitMerits(bestSuggestion.resultingClassDistributions); for (int i = 0; i < bestSuggestion.numSplits(); i++) { double value = branchMerits[i]; if (value < minValue) { minValue = value; splitIndex = i; statisticsNewRuleActiveLearningNode = bestSuggestion.resultingClassDistributionFromSplit(i); } } statisticsBranchSplit = splitDecision.resultingClassDistributionFromSplit(splitIndex); statisticsOtherBranchSplit = bestSuggestion.resultingClassDistributionFromSplit(splitIndex == 0 ? 1 : 0); } return shouldSplit; }
java
@Override public ListOrganizationsResult listOrganizations(ListOrganizationsRequest request) { request = beforeClientExecution(request); return executeListOrganizations(request); }
java
void remoteGoto(String filename, int page, float llx, float lly, float urx, float ury) { addAnnotation(new PdfAnnotation(writer, llx, lly, urx, ury, new PdfAction(filename, page))); }
python
def normalize_jr(jr, url=None): """ normalize JSON reference, also fix implicit reference of JSON pointer. input: - #/definitions/User - http://test.com/swagger.json#/definitions/User output: - http://test.com/swagger.json#/definitions/User input: - some_folder/User.json output: - http://test.com/some_folder/User.json """ if jr == None: return jr idx = jr.find('#') path, jp = (jr[:idx], jr[idx+1:]) if idx != -1 else (jr, None) if len(path) > 0: p = six.moves.urllib.parse.urlparse(path) if p.scheme == '' and url: p = six.moves.urllib.parse.urlparse(url) # it's the path of relative file path = six.moves.urllib.parse.urlunparse(p[:2]+('/'.join([os.path.dirname(p.path), path]),)+p[3:]) path = derelativise_url(path) else: path = url if path: return ''.join([path, '#', jp]) if jp else path else: return '#' + jp
python
def prev(self): """Fetch a set of items with IDs greater than current set.""" if self.limit and self.limit == self.num_tweets: raise StopIteration self.index -= 1 if self.index < 0: # There's no way to fetch a set of tweets directly 'above' the # current set raise StopIteration data = self.results[self.index] self.max_id = self.model_results[self.index].max_id self.num_tweets += 1 return data
python
def set_volume(self, pct, channel=None): """ Sets the sound volume to the given percentage [0-100] by calling ``amixer -q set <channel> <pct>%``. If the channel is not specified, it tries to determine the default one by running ``amixer scontrols``. If that fails as well, it uses the ``Playback`` channel, as that is the only channel on the EV3. """ if channel is None: channel = self._get_channel() cmd_line = '/usr/bin/amixer -q set {0} {1:d}%'.format(channel, pct) Popen(shlex.split(cmd_line)).wait()
python
def startRecording(self, url, **options): """ Allows Tropo applications to begin recording the current session. Argument: url is a string Argument: **options is a set of optional keyword arguments. See https://www.tropo.com/docs/webapi/startrecording """ self._steps.append(StartRecording(url, **options).obj)
python
def build_input(data, batch_size, dataset, train): """Build CIFAR image and labels. Args: data_path: Filename for cifar10 data. batch_size: Input batch size. train: True if we are training and false if we are testing. Returns: images: Batches of images of size [batch_size, image_size, image_size, 3]. labels: Batches of labels of size [batch_size, num_classes]. Raises: ValueError: When the specified dataset is not supported. """ image_size = 32 depth = 3 num_classes = 10 if dataset == "cifar10" else 100 images, labels = data num_samples = images.shape[0] - images.shape[0] % batch_size dataset = tf.contrib.data.Dataset.from_tensor_slices( (images[:num_samples], labels[:num_samples])) def map_train(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size + 4, image_size + 4) image = tf.random_crop(image, [image_size, image_size, 3]) image = tf.image.random_flip_left_right(image) image = tf.image.per_image_standardization(image) return (image, label) def map_test(image, label): image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size) image = tf.image.per_image_standardization(image) return (image, label) dataset = dataset.map(map_train if train else map_test) dataset = dataset.batch(batch_size) dataset = dataset.repeat() if train: dataset = dataset.shuffle(buffer_size=16 * batch_size) images, labels = dataset.make_one_shot_iterator().get_next() images = tf.reshape(images, [batch_size, image_size, image_size, depth]) labels = tf.reshape(labels, [batch_size, 1]) indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1]) labels = tf.sparse_to_dense( tf.concat([indices, labels], 1), [batch_size, num_classes], 1.0, 0.0) assert len(images.get_shape()) == 4 assert images.get_shape()[0] == batch_size assert images.get_shape()[-1] == 3 assert len(labels.get_shape()) == 2 assert labels.get_shape()[0] == batch_size assert labels.get_shape()[1] == num_classes if not train: tf.summary.image("images", images) return images, labels
java
private int calcHanSum(List<NormalYaku> yakuStock) { int hanSum = 0; if (hands.isOpen()) { for (NormalYaku yaku : yakuStock) { hanSum += yaku.getKuisagari(); } } else { for (NormalYaku yaku : yakuStock) { hanSum += yaku.getHan(); } } return hanSum; }
python
def request(self, method, url, header_auth=False, realm='', **req_kwargs): ''' A loose wrapper around Requests' :class:`~requests.sessions.Session` which injects OAuth 1.0/a parameters. :param method: A string representation of the HTTP method to be used. :type method: str :param url: The resource to be requested. :type url: str :param header_auth: Authentication via header, defaults to `False.` :type header_auth: bool :param realm: The auth header realm, defaults to ``""``. :type realm: str :param \*\*req_kwargs: Keyworded args to be passed down to Requests. :type \*\*req_kwargs: dict ''' req_kwargs.setdefault('headers', {}) req_kwargs['headers'] = CaseInsensitiveDict(req_kwargs['headers']) url = self._set_url(url) entity_method = method.upper() in ENTITY_METHODS if entity_method and not req_kwargs.get('files', None): req_kwargs['headers'].setdefault('Content-Type', FORM_URLENCODED) form_urlencoded = \ req_kwargs['headers'].get('Content-Type') == FORM_URLENCODED # inline string conversion if is_basestring(req_kwargs.get('params')): req_kwargs['params'] = dict(parse_qsl(req_kwargs['params'])) if is_basestring(req_kwargs.get('data')) and form_urlencoded: req_kwargs['data'] = dict(parse_qsl(req_kwargs['data'])) req_kwargs.setdefault('timeout', OAUTH1_DEFAULT_TIMEOUT) oauth_params = self._get_oauth_params(req_kwargs) # ensure we always create new instances of dictionary elements for key, value in req_kwargs.items(): if isinstance(value, dict): req_kwargs[key] = deepcopy(value) # sign the request oauth_params['oauth_signature'] = \ self.signature.sign(self.consumer_secret, self.access_token_secret, method, url, oauth_params, req_kwargs) if header_auth and 'oauth_signature' not in \ req_kwargs['headers'].get('Authorization', ''): req_kwargs['auth'] = OAuth1Auth(oauth_params, realm) elif entity_method and 'oauth_signature' not in \ (req_kwargs.get('data') or {}): req_kwargs['data'] = req_kwargs.get('data') or {} # If we have a urlencoded entity-body we should pass the OAuth # parameters on this body. However, if we do not, then we need to # pass these over the request URI, i.e. on params. # # See: # # http://tools.ietf.org/html/rfc5849#section-3.5.2 # # and: # # http://tools.ietf.org/html/rfc5849#section-3.5.3 if form_urlencoded: req_kwargs['data'].update(oauth_params) else: req_kwargs.setdefault('params', {}) req_kwargs['params'].update(oauth_params) elif 'oauth_signature' not in url: req_kwargs.setdefault('params', {}) req_kwargs['params'].update(oauth_params) return super(OAuth1Session, self).request(method, url, **req_kwargs)
java
private void fireControlRelease(int index, int controllerIndex) { consumed = false; for (int i=0;i<controllerListeners.size();i++) { ControllerListener listener = (ControllerListener) controllerListeners.get(i); if (listener.isAcceptingInput()) { switch (index) { case LEFT: listener.controllerLeftReleased(controllerIndex); break; case RIGHT: listener.controllerRightReleased(controllerIndex); break; case UP: listener.controllerUpReleased(controllerIndex); break; case DOWN: listener.controllerDownReleased(controllerIndex); break; default: // assume button release listener.controllerButtonReleased(controllerIndex, (index - BUTTON1) + 1); break; } if (consumed) { break; } } } }
python
def set_connection_string_by_user_input(self): """Prompts the user to input a connection string""" user_connection = input( bcolors.WARNING + "\nFor any reason connection to " + bcolors.ENDC + bcolors.FAIL + "{}".format(self.connection) + bcolors.ENDC + bcolors.WARNING + " is not possible.\n\n" + bcolors.ENDC + "For more information about SQLAlchemy connection strings go to:\n" + "http://docs.sqlalchemy.org/en/latest/core/engines.html\n\n" "Please insert a valid connection string:\n" + bcolors.UNDERLINE + "Examples:\n\n" + bcolors.ENDC + "MySQL (recommended):\n" + bcolors.OKGREEN + "\tmysql+pymysql://user:passwd@localhost/database?charset=utf8\n" + bcolors.ENDC + "PostgreSQL:\n" + bcolors.OKGREEN + "\tpostgresql://scott:tiger@localhost/mydatabase\n" + bcolors.ENDC + "MsSQL (pyodbc have to be installed):\n" + bcolors.OKGREEN + "\tmssql+pyodbc://user:passwd@database\n" + bcolors.ENDC + "SQLite (always works):\n" + " - Linux:\n" + bcolors.OKGREEN + "\tsqlite:////absolute/path/to/database.db\n" + bcolors.ENDC + " - Windows:\n" + bcolors.OKGREEN + "\tsqlite:///C:\\path\\to\\database.db\n" + bcolors.ENDC + "Oracle:\n" + bcolors.OKGREEN + "\toracle://user:[email protected]:1521/database\n\n" + bcolors.ENDC + "[RETURN] for standard connection {}:\n".format(defaults.sqlalchemy_connection_string_default) ) if not (user_connection or user_connection.strip()): user_connection = defaults.sqlalchemy_connection_string_default set_connection(user_connection.strip())
python
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._columns_streamed`` if it exists. ''' super(ColumnsStreamedEvent, self).dispatch(receiver) if hasattr(receiver, '_columns_streamed'): receiver._columns_streamed(self)
java
private final void dcompute() {// Work to do the distribution // Split out the keys into disjointly-homed sets of keys. // Find the split point. First find the range of home-indices. H2O cloud = H2O.CLOUD; int lo=cloud._memary.length, hi=-1; for( Key k : _keys ) { int i = k.home(cloud); if( i<lo ) lo=i; if( i>hi ) hi=i; // lo <= home(keys) <= hi } // Classic fork/join, but on CPUs. // Split into 3 arrays of keys: lo keys, hi keys and self keys final ArrayList<Key> locals = new ArrayList<Key>(); final ArrayList<Key> lokeys = new ArrayList<Key>(); final ArrayList<Key> hikeys = new ArrayList<Key>(); int self_idx = cloud.nidx(H2O.SELF); int mid = (lo+hi)>>>1; // Mid-point for( Key k : _keys ) { int idx = k.home(cloud); if( idx == self_idx ) locals.add(k); else if( idx < mid ) lokeys.add(k); else hikeys.add(k); } // Launch off 2 tasks for the other sets of keys, and get a place-holder // for results to block on. _lo = remote_compute(lokeys); _hi = remote_compute(hikeys); // Setup for local recursion: just use the local keys. if( locals.size() != 0 ) { // Shortcut for no local work _local = clone(); // 'this' is completer for '_local', so awaits _local completion _local._is_local = true; _local._keys = locals.toArray(new Key[locals.size()]); // Keys, including local keys (if any) _local.init(); // One-time top-level init H2O.submitTask(_local); // Begin normal execution on a FJ thread } else { tryComplete(); // No local work, so just immediate tryComplete } }
java
public Set<Profile> getProfiles() { final Set<Profile> ret = new HashSet<Profile>(); if (this.profileNames.isEmpty()) { ret.add(Profile.getDefaultProfile()); } else { for (final String name : this.profileNames) { ret.add(Profile.getProfile(name)); } } return ret; }
java
protected void setConnection(Text line) throws ParseException { connection = new ConnectionField(); connection.strain(line); Collections.sort(this.candidates); }
java
public void tryVibrate() { if (mVibrator != null) { long now = SystemClock.uptimeMillis(); // We want to try to vibrate each individual tick discretely. if (now - mLastVibrate >= 125) { mVibrator.vibrate(5); mLastVibrate = now; } } }
java
@When("^I create the zNode '(.+?)'( with content '(.+?)')? which (IS|IS NOT) ephemeral$") public void createZNode(String path, String foo, String content, boolean ephemeral) throws Exception { if (content != null) { commonspec.getZookeeperSecClient().zCreate(path, content, ephemeral); } else { commonspec.getZookeeperSecClient().zCreate(path, ephemeral); } }
java
public static void initUBLBE (@Nonnull final ValidationExecutorSetRegistry aRegistry) { ValueEnforcer.notNull (aRegistry, "Registry"); // For better error messages LocationBeautifierSPI.addMappings (UBL21NamespaceContext.getInstance ()); final IValidationExecutorSet aVESInvoice = aRegistry.getOfID (PeppolValidation370.VID_OPENPEPPOL_T10_V2.getWithVersion (PeppolValidation.VERSION_TO_USE)); final IValidationExecutorSet aVESCreditNote = aRegistry.getOfID (PeppolValidation370.VID_OPENPEPPOL_T14_V2.getWithVersion (PeppolValidation.VERSION_TO_USE)); if (aVESInvoice == null || aVESCreditNote == null) throw new IllegalStateException ("Standard PEPPOL artefacts must be registered before e-FFF artefacts!"); final boolean bNotDeprecated = false; aRegistry.registerValidationExecutorSet (ValidationExecutorSet.createDerived (aVESInvoice, VID_EFFF_INVOICE, "e-FFF Invoice " + VID_EFFF_INVOICE.getVersion (), bNotDeprecated, _createXSLT (BE_EFFF_300))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.createDerived (aVESCreditNote, VID_EFFF_CREDIT_NOTE, "e-FFF Credit Note " + VID_EFFF_CREDIT_NOTE.getVersion (), bNotDeprecated, _createXSLT (BE_EFFF_300))); // Not derived aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_BE_INVOICE, "UBL.BE Invoice " + VID_UBL_BE_INVOICE.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.INVOICE), _createSCH (UBL_BE_100))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_UBL_BE_CREDIT_NOTE, "UBL.BE Credit Note " + VID_UBL_BE_CREDIT_NOTE.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (EUBL21DocumentType.CREDIT_NOTE), _createSCH (UBL_BE_100))); }
python
def call_method(self, method_name_or_object, params=None): """ Calls the ``method_name`` method from the given service and returns a :py:class:`gemstone.client.structs.Result` instance. :param method_name_or_object: The name of te called method or a ``MethodCall`` instance :param params: A list of dict representing the parameters for the request :return: a :py:class:`gemstone.client.structs.Result` instance. """ if isinstance(method_name_or_object, MethodCall): req_obj = method_name_or_object else: req_obj = MethodCall(method_name_or_object, params) raw_response = self.handle_single_request(req_obj) response_obj = Result(result=raw_response["result"], error=raw_response['error'], id=raw_response["id"], method_call=req_obj) return response_obj
java
public boolean addAll(CompactIntSet ints) { int oldSize = size(); bitSet.or(ints.bitSet); return oldSize != size(); }
python
def parseline(line,format): """\ Given a line (a string actually) and a short string telling how to format it, return a list of python objects that result. The format string maps words (as split by line.split()) into python code: x -> Nothing; skip this word s -> Return this word as a string i -> Return this word as an int d -> Return this word as an int f -> Return this word as a float Basic parsing of strings: >>> parseline('Hello, World','ss') ['Hello,', 'World'] You can use 'x' to skip a record; you also don't have to parse every record: >>> parseline('1 2 3 4','xdd') [2, 3] >>> parseline('C1 0.0 0.0 0.0','sfff') ['C1', 0.0, 0.0, 0.0] Should this return an empty list? >>> parseline('This line wont be parsed','xx') """ xlat = {'x':None,'s':str,'f':float,'d':int,'i':int} result = [] words = line.split() for i in range(len(format)): f = format[i] trans = xlat.get(f,None) if trans: result.append(trans(words[i])) if len(result) == 0: return None if len(result) == 1: return result[0] return result
java
@Override public CommandGroup createCommandGroup(String groupId, Object[] members, CommandConfigurer configurer) { return createCommandGroup(groupId, members, false, configurer); }
python
def _get_show_ids(self): """Get the ``dict`` of show ids per series by querying the `shows.php` page. :return: show id per series, lower case and without quotes. :rtype: dict """ # get the show page logger.info('Getting show ids') r = self.session.get(self.server_url + 'shows.php', timeout=10) r.raise_for_status() soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # populate the show ids show_ids = {} for show in soup.select('td.version > h3 > a[href^="/show/"]'): show_ids[sanitize(show.text)] = int(show['href'][6:]) logger.debug('Found %d show ids', len(show_ids)) return show_ids
java
private double bend(AtomPair pair, IntStack stack, Point2d[] coords, Map<IBond,AtomPair> firstVisit) { stackBackup.clear(); assert stack.len == 0; final double score = congestion.score(); double min = score; // special case: if we have an even length path where the two // most central bonds are cyclic but the next two aren't we bend away // from each other if (pair.bndAt.length > 4 && (pair.bndAtCode & 0b11111) == 0b00110) { final IBond bndA = pair.bndAt[2]; final IBond bndB = pair.bndAt[3]; if (bfix.contains(bndA) || bfix.contains(bndB)) return Integer.MAX_VALUE; final IAtom pivotA = getCommon(bndA, pair.bndAt[1]); final IAtom pivotB = getCommon(bndB, pair.bndAt[0]); if (pivotA == null || pivotB == null) return Integer.MAX_VALUE; Arrays.fill(visited, false); int split = visit(visited, stack.xs, idxs.get(pivotA), idxs.get(bndA.getOther(pivotA)), 0); stack.len = visit(visited, stack.xs, idxs.get(pivotB), idxs.get(bndB.getOther(pivotB)), split); // perform bend one way backupCoords(backup, stack); bend(stack.xs, 0, split, pivotA, BEND_STEP); bend(stack.xs, split, stack.len, pivotB, -BEND_STEP); congestion.update(stack.xs, stack.len); if (percDiff(score, congestion.score()) >= IMPROVEMENT_PERC_THRESHOLD) { backupCoords(coords, stack); stackBackup.copyFrom(stack); min = congestion.score(); } // now bend the other way restoreCoords(stack, backup); bend(stack.xs, 0, split, pivotA, -BEND_STEP); bend(stack.xs, split, stack.len, pivotB, BEND_STEP); congestion.update(stack.xs, stack.len); if (percDiff(score, congestion.score()) >= IMPROVEMENT_PERC_THRESHOLD && congestion.score() < min) { backupCoords(coords, stack); stackBackup.copyFrom(stack); min = congestion.score(); } // restore original coordinates and reset score restoreCoords(stack, backup); congestion.update(stack.xs, stack.len); congestion.score = score; } // general case: try bending acyclic bonds in the shortest // path from inside out else { // try bending all bonds and accept the best one for (IBond bond : pair.bndAt) { if (bond.isInRing()) continue; if (bfix.contains(bond)) continue; // has this bond already been tested as part of another pair AtomPair first = firstVisit.get(bond); if (first == null) firstVisit.put(bond, first = pair); if (first != pair) continue; final IAtom beg = bond.getBegin(); final IAtom end = bond.getEnd(); final int begPriority = beg.getProperty(AtomPlacer.PRIORITY); final int endPriority = end.getProperty(AtomPlacer.PRIORITY); Arrays.fill(visited, false); if (begPriority < endPriority) stack.len = visit(visited, stack.xs, idxs.get(beg), idxs.get(end), 0); else stack.len = visit(visited, stack.xs, idxs.get(end), idxs.get(beg), 0); backupCoords(backup, stack); // bend one way if (begPriority < endPriority) bend(stack.xs, 0, stack.len, beg, pair.attempt * BEND_STEP); else bend(stack.xs, 0, stack.len, end, pair.attempt * BEND_STEP); congestion.update(visited, stack.xs, stack.len); if (percDiff(score, congestion.score()) >= IMPROVEMENT_PERC_THRESHOLD && congestion.score() < min) { backupCoords(coords, stack); stackBackup.copyFrom(stack); min = congestion.score(); } // bend other way if (begPriority < endPriority) bend(stack.xs, 0, stack.len, beg, pair.attempt * -BEND_STEP); else bend(stack.xs, 0, stack.len, end, pair.attempt * -BEND_STEP); congestion.update(visited, stack.xs, stack.len); if (percDiff(score, congestion.score()) >= IMPROVEMENT_PERC_THRESHOLD && congestion.score() < min) { backupCoords(coords, stack); stackBackup.copyFrom(stack); min = congestion.score(); } restoreCoords(stack, backup); congestion.update(visited, stack.xs, stack.len); congestion.score = score; } } stack.copyFrom(stackBackup); return min; }
java
private boolean createPhotoFolder(DropboxAPI<AndroidAuthSession> dropboxApi) { boolean folderCreated = false; if (dropboxApi != null) { try { dropboxApi.createFolder(mContext.getString(R.string.wings_dropbox__photo_folder)); folderCreated = true; } catch (DropboxException e) { // Consider the folder created if the folder already exists. if (e instanceof DropboxServerException) { folderCreated = DropboxServerException._403_FORBIDDEN == ((DropboxServerException) e).error; } } } return folderCreated; }
python
def read_pgroups(in_file): """Read HLAs and the pgroups they fall in. """ out = {} with open(in_file) as in_handle: for line in (l for l in in_handle if not l.startswith("#")): locus, alleles, group = line.strip().split(";") for allele in alleles.split("/"): out["HLA-%s%s" % (locus, allele)] = group return out
java
private void replaceHeaders(final Message from, final Message to) { to.getHeaders().clear(); to.getHeaders().putAll(from.getHeaders()); }
python
def remove_numbers(text_string): ''' Removes any digit value discovered within text_string and returns the new string as type str. Keyword argument: - text_string: string instance Exceptions raised: - InputError: occurs should a non-string argument be passed ''' if text_string is None or text_string == "": return "" elif isinstance(text_string, str): return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split()) else: raise InputError("string not passed as argument")
java
public String getString(String key, String defaultValue) { return configuration.getString(key, defaultValue); }
python
def _conv_general_shape_tuple(self, lhs_shape, rhs_shape, window_strides, padding, dimension_numbers): """Generalized computation of conv shape.""" lhs_perm, rhs_perm, out_perm = self._conv_general_permutations( dimension_numbers) lhs_trans = onp.take(lhs_shape, lhs_perm) rhs_trans = onp.take(rhs_shape, rhs_perm) out_trans = self._conv_shape_tuple( lhs_trans, rhs_trans, window_strides, padding) return tuple(onp.take(out_trans, onp.argsort(out_perm)))
java
public MoneyFormatterBuilder appendSigned( MoneyFormatter whenPositiveOrZero, MoneyFormatter whenNegative) { return appendSigned(whenPositiveOrZero, whenPositiveOrZero, whenNegative); }
java
public String verifyAndExtract(String signedStr) { int index = signedStr.lastIndexOf(SIGNATURE); if (index == -1) { throw new IllegalArgumentException("Invalid input sign: " + signedStr); } String originalSignature = signedStr.substring(index + SIGNATURE.length()); String rawValue = signedStr.substring(0, index); String currentSignature = getSignature(rawValue); if (LOG.isDebugEnabled()) { LOG.debug("Signature generated for " + rawValue + " inside verify is " + currentSignature); } if (!originalSignature.equals(currentSignature)) { throw new IllegalArgumentException("Invalid sign, original = " + originalSignature + " current = " + currentSignature); } return rawValue; }
python
def directives(): ''' Return list of directives together with expected arguments and places where the directive is valid (``apachectl -L``) CLI Example: .. code-block:: bash salt '*' apache.directives ''' cmd = '{0} -L'.format(_detect_os()) ret = {} out = __salt__['cmd.run'](cmd) out = out.replace('\n\t', '\t') for line in out.splitlines(): if not line: continue comps = line.split('\t') desc = '\n'.join(comps[1:]) ret[comps[0]] = desc return ret
java
public static <T> String getSoftDeleteSQL(T t, Column softDeleteColumn, List<Object> values) { String setSql = getColumnName(softDeleteColumn) + "=" + softDeleteColumn.softDelete()[1]; return getCustomDeleteSQL(t, values, setSql); }
python
def masters(self): """Returns a list of dictionaries containing each master's state.""" fut = self.execute(b'MASTERS', encoding='utf-8') # TODO: process masters: we can adjust internal state return wait_convert(fut, parse_sentinel_masters)
java
@Override public Map<String, Object> getQueryParameters() { HashMap<String, Object> params = new HashMap<String, Object>(); params.put("type", this.type.toString()); return params; }
java
protected void unregisterElementErrListener(MediaElement element, final ListenerSubscription subscription) { if (element == null || subscription == null) { return; } element.removeErrorListener(subscription); }
java
private void printServiceInstance(ServiceInstance instance) { print("\nServiceInstance\n-------------------------"); if (instance == null) { print("null"); return; } print("serviceName: " + instance.getServiceName()); print("status: " + instance.getStatus()); print("uri: " + instance.getUri()); print("address: " + instance.getAddress()); print("monitorEnabled: " + instance.isMonitorEnabled()); print("metadata:"); Map<String, String> meta = instance.getMetadata(); for (Entry<String, String> entry : meta.entrySet()) { print(" " + entry.getKey() + " => " + entry.getValue()); } }
java
@Override public MatchResult match(URI origin, URI destination) { String queryStr = new StringBuffer() .append(generateQueryHeader()) .append(generateMatchWhereClause(origin, destination, false)) .append(generateQueryFooter()) .toString(); log.debug("SPARQL Query generated: \n {}", queryStr); return queryForMatchResult(origin, destination, queryStr); }
python
def set_data(self, value): """Sets a new string as response. The value set must either by a unicode or bytestring. If a unicode string is set it's encoded automatically to the charset of the response (utf-8 by default). .. versionadded:: 0.9 """ # if an unicode string is set, it's encoded directly so that we # can set the content length if isinstance(value, text_type): value = value.encode(self.charset) else: value = bytes(value) self.response = [value] if self.automatically_set_content_length: self.headers['Content-Length'] = str(len(value))
java
public static <T> JoinerQuery<T, Long> count(EntityPath<T> from) { JoinerQueryBase<T, Long> request = new JoinerQueryBase<>(from, true); request.distinct(false); return request; }
python
def cursor(self): """Analogous to :any:`sqlite3.Connection.cursor`""" if self.single_cursor_mode: if self._cursor is None: raise sqlite3.ProgrammingError("Cannot operate on a closed database.") return self._cursor return Cursor(self)
python
def get_token(self, user_id, password, redirect_uri, scope='/activities/update'): """Get the token. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. :param scope: string The desired scope. For example '/activities/update', '/read-limited', etc. Returns ------- :returns: string The token. """ return super(MemberAPI, self).get_token(user_id, password, redirect_uri, scope)
java
public void onInfoChanged(AVIMClient client, AVIMConversation conversation, JSONObject attr, String operator) { LOGGER.d("Notification --- " + operator + " by member: " + operator + ", changedTo: " + attr.toJSONString()); }
java
public static /*@pure@*/ int maxIndex(int[] ints) { int maximum = 0; int maxIndex = 0; for (int i = 0; i < ints.length; i++) { if ((i == 0) || (ints[i] > maximum)) { maxIndex = i; maximum = ints[i]; } } return maxIndex; }
java
public SDVariable cosineDistance(String name, @NonNull SDVariable label, @NonNull SDVariable predictions, int dimension) { return cosineDistance(name, label, predictions, null, LossReduce.MEAN_BY_NONZERO_WEIGHT_COUNT, dimension); }
python
def remove_section(self, section): """Remove a file section.""" existed = section in self._sections if existed: del self._sections[section] del self._proxies[section] return existed
java
@Deprecated @Override public String render( SoyTemplateInfo templateInfo, @Nullable SoyRecord data, @Nullable SoyMsgBundle msgBundle) { return (new RendererImpl(this, templateInfo.getName())) .setData(data) .setMsgBundle(msgBundle) .render(); }
python
def get(self, key: Any, default: Any = None) -> Any: """ 获取 cookie 中的 value """ if key in self: return self[key].value return default
python
def construct_latent_tower(self, images, time_axis): """Create the latent tower.""" # No latent in the first phase first_phase = tf.less( self.get_iteration_num(), self.hparams.num_iterations_1st_stage) # use all frames by default but this allows more # predicted frames at inference time latent_num_frames = self.hparams.latent_num_frames tf.logging.info("Creating latent tower with %d frames." % latent_num_frames) if latent_num_frames > 0: images = images[:, :latent_num_frames] return common_video.conv_latent_tower( images=images, time_axis=time_axis, latent_channels=self.hparams.latent_channels, min_logvar=self.hparams.latent_std_min, is_training=self.is_training, random_latent=first_phase, tiny_mode=self.hparams.tiny_mode, small_mode=self.hparams.small_mode)
python
def _from_dict(cls, _dict): """Initialize a SourceOptions object from a json dictionary.""" args = {} if 'folders' in _dict: args['folders'] = [ SourceOptionsFolder._from_dict(x) for x in (_dict.get('folders')) ] if 'objects' in _dict: args['objects'] = [ SourceOptionsObject._from_dict(x) for x in (_dict.get('objects')) ] if 'site_collections' in _dict: args['site_collections'] = [ SourceOptionsSiteColl._from_dict(x) for x in (_dict.get('site_collections')) ] if 'urls' in _dict: args['urls'] = [ SourceOptionsWebCrawl._from_dict(x) for x in (_dict.get('urls')) ] if 'buckets' in _dict: args['buckets'] = [ SourceOptionsBuckets._from_dict(x) for x in (_dict.get('buckets')) ] if 'crawl_all_buckets' in _dict: args['crawl_all_buckets'] = _dict.get('crawl_all_buckets') return cls(**args)
java
public AmqpChannel closeChannel(int replyCode, String replyText, int classId, int methodId1) { if (readyState == ReadyState.CLOSED) { return this; } Object[] args = {replyCode, replyText, classId, methodId1}; WrappedByteBuffer bodyArg = null; HashMap<String, Object> headersArg = null; String methodName = "closeChannel"; String methodId = "20" + "40"; AmqpMethod amqpMethod = MethodLookup.LookupMethod(methodId); Object[] arguments = {this, amqpMethod, this.id, args, bodyArg, headersArg}; asyncClient.enqueueAction(methodName, "channelWrite", arguments, null, null); return this; }
python
def identical_functions(self): """ :returns: A list of function matches that appear to be identical """ identical_funcs = [] for (func_a, func_b) in self.function_matches: if self.functions_probably_identical(func_a, func_b): identical_funcs.append((func_a, func_b)) return identical_funcs
python
def _replace_tex_math(node, mml_url, mc_client=None, retry=0): """call mml-api service to replace TeX math in body of node with mathml""" math = node.attrib['data-math'] or node.text if math is None: return None eq = {} if mc_client: math_key = hashlib.md5(math.encode('utf-8')).hexdigest() eq = json.loads(mc_client.get(math_key) or '{}') if not eq: res = requests.post(mml_url, {'math': math.encode('utf-8'), 'mathType': 'TeX', 'mml': 'true'}) if res: # Non-error response from requests eq = res.json() if mc_client: mc_client.set(math_key, res.text) if 'components' in eq and len(eq['components']) > 0: for component in eq['components']: if component['format'] == 'mml': mml = etree.fromstring(component['source']) if node.tag.endswith('span'): mml.set('display', 'inline') elif node.tag.endswith('div'): mml.set('display', 'block') mml.tail = node.tail return mml else: logger.warning('Retrying math TeX conversion: ' '{}'.format(json.dumps(eq, indent=4))) retry += 1 if retry < 2: return _replace_tex_math(node, mml_url, mc_client, retry) return None
python
def subs2seqs(self) -> Dict[str, List[str]]: """A |collections.defaultdict| containing the node-specific information provided by XML `sequences` element. >>> from hydpy.auxs.xmltools import XMLInterface >>> from hydpy import data >>> interface = XMLInterface('single_run.xml', data.get_path('LahnH')) >>> series_io = interface.series_io >>> subs2seqs = series_io.writers[2].subs2seqs >>> for subs, seq in sorted(subs2seqs.items()): ... print(subs, seq) node ['sim', 'obs'] """ subs2seqs = collections.defaultdict(list) nodes = find(self.find('sequences'), 'node') if nodes is not None: for seq in nodes: subs2seqs['node'].append(strip(seq.tag)) return subs2seqs
python
def applyKeyMapping(self, mapping): """ Used as the second half of the key reassignment algorithm. Loops over each row in the table, replacing references to old row keys with the new values from the mapping. """ for coltype, colname in zip(self.columntypes, self.columnnames): if coltype in ligolwtypes.IDTypes and (self.next_id is None or colname != self.next_id.column_name): column = self.getColumnByName(colname) for i, old in enumerate(column): try: column[i] = mapping[old] except KeyError: pass
java
public Observable<ServiceResponse<List<MetricDefinitionInner>>> listMetricDefinitionsWithServiceResponseAsync(String resourceGroupName, String serverName, String databaseName) { if (this.client.subscriptionId() == null) { throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); } if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } if (serverName == null) { throw new IllegalArgumentException("Parameter serverName is required and cannot be null."); } if (databaseName == null) { throw new IllegalArgumentException("Parameter databaseName is required and cannot be null."); } if (this.client.apiVersion() == null) { throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); } return service.listMetricDefinitions(this.client.subscriptionId(), resourceGroupName, serverName, databaseName, this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<List<MetricDefinitionInner>>>>() { @Override public Observable<ServiceResponse<List<MetricDefinitionInner>>> call(Response<ResponseBody> response) { try { ServiceResponse<PageImpl<MetricDefinitionInner>> result = listMetricDefinitionsDelegate(response); List<MetricDefinitionInner> items = null; if (result.body() != null) { items = result.body().items(); } ServiceResponse<List<MetricDefinitionInner>> clientResponse = new ServiceResponse<List<MetricDefinitionInner>>(items, result.response()); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
python
def _help(): """ Display both SQLAlchemy and Python help statements """ statement = '%s%s' % (shelp, phelp % ', '.join(cntx_.keys())) print statement.strip()
java
protected String buildActionName(MappingPathResource mappingResource, String pkg, String classPrefix) { final String actionNameSuffix = mappingResource.getActionNameSuffix().orElse(""); // option so basically empty final String actionSuffix = namingConvention.getActionSuffix(); // e.g. 'Action' return (pkg != null ? pkg : "") + classPrefix + actionNameSuffix + actionSuffix; // e.g. sea_seaLandAction, sea_seaLandSpAction }
python
def _break_signals(self): r"""Break N-dimensional signals into N 1D signals.""" for name in list(self.signals.keys()): if self.signals[name].ndim == 2: for i, signal_1d in enumerate(self.signals[name].T): self.signals[name + '_' + str(i)] = signal_1d del self.signals[name]
python
def typecast(type_, value): """ Tries to smartly typecast the given value with the given type. :param type_: The type to try to use for the given value :param value: The value to try and typecast to the given type :return: The typecasted value if possible, otherwise just the original value """ # NOTE: does not do any special validation of types before casting # will just raise errors on type casting failures if is_builtin_type(type_) or is_collections_type(type_) or is_enum_type(type_): # FIXME: move to Types enum and TYPE_MAPPING entry if is_bytes_type(type_): return decode_bytes(value) return type_(value) elif is_regex_type(type_): return typecast(str, value) elif is_typing_type(type_): try: base_type = type_.__extra__ except AttributeError: # NOTE: when handling typing._GenericAlias __extra__ is actually __origin__ base_type = type_.__origin__ arg_types = type_.__args__ if is_array_type(type_): if len(arg_types) == 1: item_type = arg_types[0] return base_type([typecast(item_type, item) for item in value]) else: return base_type(value) elif is_object_type(type_): if len(arg_types) == 2: (key_type, item_type) = arg_types return base_type( { typecast(key_type, key): typecast(item_type, item) for (key, item) in value.items() } ) else: return base_type(value) else: return base_type(value) else: return value
java
public BigInteger unpackBigInteger() throws IOException { byte b = readByte(); if (Code.isFixInt(b)) { return BigInteger.valueOf((long) b); } switch (b) { case Code.UINT8: // unsigned int 8 byte u8 = readByte(); return BigInteger.valueOf((long) (u8 & 0xff)); case Code.UINT16: // unsigned int 16 short u16 = readShort(); return BigInteger.valueOf((long) (u16 & 0xffff)); case Code.UINT32: // unsigned int 32 int u32 = readInt(); if (u32 < 0) { return BigInteger.valueOf((long) (u32 & 0x7fffffff) + 0x80000000L); } else { return BigInteger.valueOf((long) u32); } case Code.UINT64: // unsigned int 64 long u64 = readLong(); if (u64 < 0L) { BigInteger bi = BigInteger.valueOf(u64 + Long.MAX_VALUE + 1L).setBit(63); return bi; } else { return BigInteger.valueOf(u64); } case Code.INT8: // signed int 8 byte i8 = readByte(); return BigInteger.valueOf((long) i8); case Code.INT16: // signed int 16 short i16 = readShort(); return BigInteger.valueOf((long) i16); case Code.INT32: // signed int 32 int i32 = readInt(); return BigInteger.valueOf((long) i32); case Code.INT64: // signed int 64 long i64 = readLong(); return BigInteger.valueOf(i64); } throw unexpected("Integer", b); }
java
@Override public void terminateMachine( TargetHandlerParameters parameters, String machineId ) throws TargetException { this.logger.fine( "Terminating an in-memory agent." ); Map<String,String> targetProperties = preventNull( parameters.getTargetProperties()); // If we executed real recipes, undeploy everything first. // That's because we do not really terminate the agent's machine, we just kill the agent. // So, it is important to stop and undeploy properly. if( ! simulatePlugins( targetProperties )) { this.logger.fine( "Stopping instances correctly (real recipes are used)." ); Map.Entry<String,String> ctx = parseMachineId( machineId ); ManagedApplication ma = this.manager.applicationMngr().findManagedApplicationByName( ctx.getValue()); // We do not want to undeploy the scoped instances, but its children. try { Instance scopedInstance = InstanceHelpers.findInstanceByPath( ma.getApplication(), ctx.getKey()); for( Instance childrenInstance : scopedInstance.getChildren()) this.manager.instancesMngr().changeInstanceState( ma, childrenInstance, InstanceStatus.NOT_DEPLOYED ); } catch( IOException e ) { throw new TargetException( e ); } } // Destroy the IPojo Factory factory = findIPojoFactory( parameters ); deleteIPojo( factory, machineId ); }
python
def use_refresh_token(self, refresh_token, scope=None): # type (str, Optional[List[str]]) -> Tuple[se_leg_op.access_token.AccessToken, Optional[str]] """ Creates a new access token, and refresh token, based on the supplied refresh token. :return: new access token and new refresh token if the old one had an expiration time """ if refresh_token not in self.refresh_tokens: raise InvalidRefreshToken('{} unknown'.format(refresh_token)) refresh_token_info = self.refresh_tokens[refresh_token] if 'exp' in refresh_token_info and refresh_token_info['exp'] < int(time.time()): raise InvalidRefreshToken('{} has expired'.format(refresh_token)) authz_info = self.access_tokens[refresh_token_info['access_token']] if scope: if not requested_scope_is_allowed(scope, authz_info['granted_scope']): logger.debug('trying to refresh token with superset scope, requested_scope=%s, granted_scope=%s', scope, authz_info['granted_scope']) raise InvalidScope('Requested scope includes non-granted value') scope = ' '.join(scope) logger.debug('refreshing token with new scope, old_scope=%s -> new_scope=%s', authz_info['scope'], scope) else: # OAuth 2.0: scope: "[...] if omitted is treated as equal to the scope originally granted by the resource owner" scope = authz_info['granted_scope'] new_access_token = self._create_access_token(authz_info['sub'], authz_info[self.KEY_AUTHORIZATION_REQUEST], authz_info['granted_scope'], scope) new_refresh_token = None if self.refresh_token_threshold \ and 'exp' in refresh_token_info \ and refresh_token_info['exp'] - int(time.time()) < self.refresh_token_threshold: # refresh token is close to expiry, issue a new one new_refresh_token = self.create_refresh_token(new_access_token.value) else: self.refresh_tokens[refresh_token]['access_token'] = new_access_token.value logger.debug('refreshed tokens, new_access_token=%s new_refresh_token=%s old_refresh_token=%s', new_access_token, new_refresh_token, refresh_token) return new_access_token, new_refresh_token
python
def get_derived_metric_by_version(self, id, version, **kwargs): # noqa: E501 """Get a specific historical version of a specific derived metric definition # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_derived_metric_by_version(id, version, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param int version: (required) :return: ResponseContainerDerivedMetricDefinition If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_derived_metric_by_version_with_http_info(id, version, **kwargs) # noqa: E501 else: (data) = self.get_derived_metric_by_version_with_http_info(id, version, **kwargs) # noqa: E501 return data
python
def sort_values(self, ascending=False): """ Sorts the values of this series """ if self.index_type is not None: index_expr = grizzly_impl.get_field(self.expr, 0) column_expr = grizzly_impl.get_field(self.expr, 1) zip_expr = grizzly_impl.zip_columns([index_expr, column_expr]) result_expr = grizzly_impl.sort(zip_expr, 1, self.weld_type, ascending) unzip_expr = grizzly_impl.unzip_columns( result_expr, [self.index_type, self.weld_type] ) return SeriesWeld( unzip_expr, self.weld_type, self.df, self.column_name, self.index_type, self.index_name ) else: result_expr = grizzly_impl.sort(self.expr)
python
def execute(self): """Run selected module generator.""" if self._cli_arguments['cfn']: generate_sample_cfn_module(self.env_root) elif self._cli_arguments['sls']: generate_sample_sls_module(self.env_root) elif self._cli_arguments['sls-tsc']: generate_sample_sls_tsc_module(self.env_root) elif self._cli_arguments['stacker']: generate_sample_stacker_module(self.env_root) elif self._cli_arguments['tf']: generate_sample_tf_module(self.env_root) elif self._cli_arguments['cdk-tsc']: generate_sample_cdk_tsc_module(self.env_root) elif self._cli_arguments['cdk-py']: generate_sample_cdk_py_module(self.env_root) elif self._cli_arguments['cdk-csharp']: generate_sample_cdk_cs_module(self.env_root)
java
public static String buildUrl(String baseUrl, String path) { if (path.startsWith("http:") || path.startsWith("https:")) { return path; } if (StringUtils.isEmpty(baseUrl)) { return file2url(concatPath(LOCAL_BASE_URL, path)); } else { if (baseUrl.startsWith("http:") || baseUrl.startsWith("https:")) { return concatPath(baseUrl, path); } else { return concatPath(file2url(baseUrl), path); } } }
java
private int renderMBeans(JsonGenerator jg, String[] mBeanNames) throws IOException, MalformedObjectNameException { jg.writeStartObject(); Set<ObjectName> nameQueries, queriedObjects; nameQueries = new HashSet<ObjectName>(); queriedObjects = new HashSet<ObjectName>(); // if no mbean names provided, add one null entry to query everything if (mBeanNames == null) { nameQueries.add(null); } else { for (String mBeanName : mBeanNames) { if (mBeanName != null) { nameQueries.add(new ObjectName(mBeanName)); } } } // perform name queries for (ObjectName nameQuery : nameQueries) { queriedObjects.addAll(mBeanServer.queryNames(nameQuery, null)); } // render each query result for (ObjectName objectName : queriedObjects) { renderMBean(jg, objectName); } jg.writeEndObject(); return HttpServletResponse.SC_OK; }
python
def update_role(role,**kwargs): """ Update the role. Used to add permissions and users to a role. """ #check_perm(kwargs.get('user_id'), 'edit_role') try: role_i = db.DBSession.query(Role).filter(Role.id==role.id).one() role_i.name = role.name role_i.code = role.code except NoResultFound: raise ResourceNotFoundError("Role (role_id=%s) does not exist"%(role.id)) for perm in role.permissions: _get_perm(perm.id) roleperm_i = RolePerm(role_id=role.id, perm_id=perm.id ) db.DBSession.add(roleperm_i) for user in role.users: _get_user(user.id) roleuser_i = RoleUser(user_id=user.id, perm_id=perm.id ) db.DBSession.add(roleuser_i) db.DBSession.flush() return role_i
python
def gather_explicit_activities(self): """Aggregate all explicit activities and active forms of Agents. This function iterates over self.statements and extracts explicitly stated activity types and active forms for Agents. """ for stmt in self.statements: agents = stmt.agent_list() # Activity types given as ActivityConditions for agent in agents: if agent is not None and agent.activity is not None: agent_base = self._get_base(agent) agent_base.add_activity(agent.activity.activity_type) # Object activities given in RegulateActivity statements if isinstance(stmt, RegulateActivity): if stmt.obj is not None: obj_base = self._get_base(stmt.obj) obj_base.add_activity(stmt.obj_activity) # Activity types given in ActiveForms elif isinstance(stmt, ActiveForm): agent_base = self._get_base(stmt.agent) agent_base.add_activity(stmt.activity) if stmt.is_active: agent_base.add_active_state(stmt.activity, stmt.agent, stmt.evidence) else: agent_base.add_inactive_state(stmt.activity, stmt.agent, stmt.evidence)
java
public static <K, V> Map<K, V> toMap(Collection<Mappable<K, V>> aMappables) { if (aMappables == null) throw new IllegalArgumentException( "aMappables required in Organizer"); Map<K, V> map = new HashMap<K, V>(aMappables.size()); Mappable<K, V> mappable = null; for (Iterator<Mappable<K, V>> i = aMappables.iterator(); i.hasNext();) { mappable = i.next(); map.put((K) mappable.getKey(), (V) mappable.getValue()); } return map; }
python
def _clean_data(cls, *args, **kwargs): """ Convert raw data into a dictionary with plot-type specific methods. The result of the cleaning operation should be a dictionary. If the dictionary contains a 'data' field it will be passed directly (ensuring appropriate formatting). Otherwise, it should be a dictionary of data-type specific array data (e.g. 'points', 'timeseries'), which will be labeled appropriately (see _check_unkeyed_arrays). """ datadict = cls.clean(*args, **kwargs) if 'data' in datadict: data = datadict['data'] data = cls._ensure_dict_or_list(data) else: data = {} for key in datadict: if key == 'images': data[key] = datadict[key] else: d = cls._ensure_dict_or_list(datadict[key]) data[key] = cls._check_unkeyed_arrays(key, d) return data