language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void writeEmptyPages(Stack<Integer> emptyPages, RandomAccessFile file) throws IOException { if(emptyPages.isEmpty()) { this.emptyPagesSize = 0; return; // nothing to write } ByteArrayOutputStream baos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(baos); oos.writeObject(emptyPages); oos.flush(); byte[] bytes = baos.toByteArray(); this.emptyPagesSize = bytes.length; oos.close(); baos.close(); if(this.emptyPagesSize > 0) { file.seek(file.length()); file.write(bytes); } }
java
public void setFocus(boolean focus) { if (focus) { if (currentFocus != null) { currentFocus.setFocus(false); } currentFocus = this; } else { if (currentFocus == this) { currentFocus = null; } } this.focus = focus; }
java
private boolean collapseAssignEqualTo(Node expr, Node exprParent, Node value) { Node assign = expr.getFirstChild(); Node parent = exprParent; Node next = expr.getNext(); while (next != null) { switch (next.getToken()) { case AND: case OR: case HOOK: case IF: case RETURN: case EXPR_RESULT: // Dive down the left side parent = next; next = next.getFirstChild(); break; case CONST: case LET: case VAR: if (next.getFirstChild().hasChildren()) { parent = next.getFirstChild(); next = parent.getFirstChild(); break; } return false; case GETPROP: case NAME: if (next.isQualifiedName()) { if (value.isQualifiedName() && next.matchesQualifiedName(value)) { // If the previous expression evaluates to value of a // qualified name, and that qualified name is used again // shortly, then we can exploit the assign here. // Verify the assignment doesn't change its own value. if (!isSafeReplacement(next, assign)) { return false; } exprParent.removeChild(expr); expr.removeChild(assign); parent.replaceChild(next, assign); reportChangeToEnclosingScope(parent); return true; } } return false; case ASSIGN: // Assigns are really tricky. In lots of cases, we want to inline // into the right side of the assign. But the left side of the // assign is evaluated first, and it may have convoluted logic: // a = null; // (a = b).c = null; // We don't want to exploit the first assign. Similarly: // a.b = null; // a.b.c = null; // We don't want to exploit the first assign either. // // To protect against this, we simply only inline when the left side // is guaranteed to evaluate to the same L-value no matter what. Node leftSide = next.getFirstChild(); if (leftSide.isName() || (leftSide.isGetProp() && leftSide.getFirstChild().isThis())) { // Dive down the right side of the assign. parent = next; next = leftSide.getNext(); break; } else { return false; } default: if (NodeUtil.isImmutableValue(next) && next.isEquivalentTo(value)) { // If the r-value of the expr assign is an immutable value, // and the value is used again shortly, then we can exploit // the assign here. exprParent.removeChild(expr); expr.removeChild(assign); parent.replaceChild(next, assign); reportChangeToEnclosingScope(parent); return true; } // Return without inlining a thing return false; } } return false; }
java
public static void main(final String[] args) throws Exception { if (args.length == 0) { System.err.format("Usage: %s <filenames>...%n", SbeTool.class.getName()); System.exit(-1); } for (final String fileName : args) { final Ir ir; if (fileName.endsWith(".xml")) { final String xsdFilename = System.getProperty(SbeTool.VALIDATION_XSD); if (xsdFilename != null) { validateAgainstSchema(fileName, xsdFilename); } ir = new IrGenerator().generate(parseSchema(fileName), System.getProperty(TARGET_NAMESPACE)); } else if (fileName.endsWith(".sbeir")) { ir = new IrDecoder(fileName).decode(); } else { System.err.println("Input file format not supported: " + fileName); System.exit(-1); return; } final String outputDirName = System.getProperty(OUTPUT_DIR, "."); if (Boolean.parseBoolean(System.getProperty(GENERATE_STUBS, "true"))) { final String targetLanguage = System.getProperty(TARGET_LANGUAGE, "Java"); generate(ir, outputDirName, targetLanguage); } if (Boolean.parseBoolean(System.getProperty(GENERATE_IR, "false"))) { final File inputFile = new File(fileName); final String inputFilename = inputFile.getName(); final int nameEnd = inputFilename.lastIndexOf('.'); final String namePart = inputFilename.substring(0, nameEnd); final File fullPath = new File(outputDirName, namePart + ".sbeir"); try (IrEncoder irEncoder = new IrEncoder(fullPath.getAbsolutePath(), ir)) { irEncoder.encode(); } } } }
python
def remove_trivial(root): ''' Remove redundant statements. The statement `a = 1` will be removed:: a = 1 a = 2 The statement `a = 1` will not be removed because `b` depends on it:: a = 1 b = a + 2 a = 2 :param root: ast node ''' gen = GatherAssignments() gen.visit(root) to_remove = [] for symbol, assignments in gen.assign_id_map.items(): if len(assignments) < 2: continue for j in range(len(assignments) - 1): i1 = root.body.index(assignments[j].root) i2 = root.body.index(assignments[j + 1].root) body = root.body[i1 + 1:i2] grapher = GraphGen() for stmnt in body: grapher.visit(stmnt) if symbol not in grapher.used: to_remove.extend(assignments[j].assignments) Pass = lambda node: _ast.Pass(lineno=node.lineno, col_offset=node.col_offset) for old in to_remove: replace_nodes(root, old, Pass(old))
java
public Map<String, Collection<String>> toMap() { Map<String, Collection<String>> params = new HashMap<String, Collection<String>>(); if (null != location) { ArrayList<String> valueList = new ArrayList<String>(); valueList.add(location); params.put("location", valueList); } ArrayList<String> mediaModeValueList = new ArrayList<String>(); mediaModeValueList.add(mediaMode.toString()); params.put("p2p.preference", mediaModeValueList); ArrayList<String> archiveModeValueList = new ArrayList<String>(); archiveModeValueList.add(archiveMode.toString()); params.put("archiveMode", archiveModeValueList); return params; }
java
public CompartmentDefinition addResource(CompartmentDefinitionResourceComponent t) { //3 if (t == null) return this; if (this.resource == null) this.resource = new ArrayList<CompartmentDefinitionResourceComponent>(); this.resource.add(t); return this; }
python
def get_elevation(self, latitude, longitude, approximate=None): """ If approximate is True then only the points from SRTM grid will be used, otherwise a basic aproximation of nearby points will be calculated. """ if not (self.latitude - self.resolution <= latitude < self.latitude + 1): raise Exception('Invalid latitude %s for file %s' % (latitude, self.file_name)) if not (self.longitude <= longitude < self.longitude + 1 + self.resolution): raise Exception('Invalid longitude %s for file %s' % (longitude, self.file_name)) row, column = self.get_row_and_column(latitude, longitude) if approximate: return self.approximation(latitude, longitude) else: return self.get_elevation_from_row_and_column(int(row), int(column))
java
protected base_resource[] get_nitro_bulk_response(nitro_service service, String response) throws Exception { xen_health_monitor_temp_responses result = (xen_health_monitor_temp_responses) service.get_payload_formatter().string_to_resource(xen_health_monitor_temp_responses.class, response); if(result.errorcode != 0) { if (result.errorcode == SESSION_NOT_EXISTS) service.clear_session(); throw new nitro_exception(result.message, result.errorcode, (base_response [])result.xen_health_monitor_temp_response_array); } xen_health_monitor_temp[] result_xen_health_monitor_temp = new xen_health_monitor_temp[result.xen_health_monitor_temp_response_array.length]; for(int i = 0; i < result.xen_health_monitor_temp_response_array.length; i++) { result_xen_health_monitor_temp[i] = result.xen_health_monitor_temp_response_array[i].xen_health_monitor_temp[0]; } return result_xen_health_monitor_temp; }
python
def load_config(json_path): """Load config info from a .json file and return it.""" with open(json_path, 'r') as json_file: config = json.loads(json_file.read()) # sanity-test the config: assert(config['tree'][0]['page'] == 'index') return config
python
def GetReportData(self, get_report_args, token): """Show how the last active breakdown evolved over time.""" report = rdf_report_plugins.ApiReportData( representation_type=rdf_report_plugins.ApiReportData.RepresentationType .LINE_CHART) series_with_timestamps = client_report_utils.FetchAllGraphSeries( get_report_args.client_label, rdf_stats.ClientGraphSeries.ReportType.N_DAY_ACTIVE, period=rdfvalue.Duration("180d")) categories = {} for timestamp, graph_series in sorted(iteritems(series_with_timestamps)): self._ProcessGraphSeries(graph_series, timestamp, categories) graphs = [] for k, v in iteritems(categories): graph = dict(label=k, data=v) graphs.append(graph) report.line_chart.data = sorted( (rdf_report_plugins.ApiReportDataSeries2D( label=label, points=(rdf_report_plugins.ApiReportDataPoint2D(x=x, y=y) for x, y in points)) for label, points in iteritems(categories)), key=lambda series: int(series.label.split()[0]), reverse=True) return report
python
def init_argparser_optional_advice( self, argparser, default=[], help=( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' )): """ For setting up optional advice. """ argparser.add_argument( '--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help )
java
private static Class getByClass(String name) { try { return Thread.currentThread().getContextClassLoader().loadClass(name); } catch (Throwable e) { } return null; }
python
def log(self, msg, level=INFO): """Record a line of log in logger :param str msg: content of the messag :param level: logging level :return: None """ logger.log(level, '<{}> - '.format(self._name) + msg)
java
public void setValidators(final Iterable<Validator<?>> validators) { this.validators.clear(); for (final Validator<?> validator : validators) { // this ensures we map OptionalConfigurationComponent classes to validators of the same class this.validators.put(validator.getSupportedClass(), validator); } }
java
protected Map<String, Map<String, String>> createStepItemDataContext(final Framework framework, final String project, final Map<String, Map<String, String>> context, final Map<String, Object> configuration) { final Map<String, Map<String, String>> localDataContext = createScriptDataContext(framework, project, context); final HashMap<String, String> configMap = new HashMap<String, String>(); //convert values to string for (final Map.Entry<String, Object> entry : configuration.entrySet()) { configMap.put(entry.getKey(), entry.getValue().toString()); } localDataContext.put("config", configMap); return localDataContext; }
java
private Map<String, JcrRepository> loadRepositories() { Map<String, JcrRepository> list = new HashMap<>(); Set<String> names = RepositoryManager.getJcrRepositoryNames(); for (String repositoryId : names) { try { Repository repository = RepositoryManager.getRepository(repositoryId); list.put(repositoryId, new JcrMsRepository(repository, pathManager, typeManager, typeHandlerManager)); log.debug("--- loaded repository " + repositoryId); } catch (NoSuchRepositoryException e) { // should never happen; } } return list; }
python
def check_pin_trust(self, environ): """Checks if the request passed the pin test. This returns `True` if the request is trusted on a pin/cookie basis and returns `False` if not. Additionally if the cookie's stored pin hash is wrong it will return `None` so that appropriate action can be taken. """ if self.pin is None: return True val = parse_cookie(environ).get(self.pin_cookie_name) if not val or "|" not in val: return False ts, pin_hash = val.split("|", 1) if not ts.isdigit(): return False if pin_hash != hash_pin(self.pin): return None return (time.time() - PIN_TIME) < int(ts)
java
public URI toURI() throws URISyntaxException { return new URI(_scheme, null, _host, _port, _path, _query == null ? null : UrlEncoded.decodeString(_query), _fragment); }
java
public Surface draw (Tile tile, float dx, float dy, float dw, float dh, float sx, float sy, float sw, float sh) { if (!checkIntersection || intersects(dx, dy, dw, dh)) { tile.addToBatch(batch, tint, tx(), dx, dy, dw, dh, sx, sy, sw, sh); } return this; }
java
public DERObject toASN1Object() { ASN1EncodableVector v = new ASN1EncodableVector(); v.add(version); v.add(holder); v.add(issuer); v.add(signature); v.add(serialNumber); v.add(attrCertValidityPeriod); v.add(attributes); if (issuerUniqueID != null) { v.add(issuerUniqueID); } if (extensions != null) { v.add(extensions); } return new DERSequence(v); }
java
public static <M extends AbstractModule> M module(M module, String... requires) { return module(module, null, requires == null ? EMPTY_STRING_ARRAY : requires); }
java
public InputStream generateThumbnailInStream(int width, int height, byte[] image, GenerateThumbnailInStreamOptionalParameter generateThumbnailInStreamOptionalParameter) { return generateThumbnailInStreamWithServiceResponseAsync(width, height, image, generateThumbnailInStreamOptionalParameter).toBlocking().single().body(); }
python
def _to_spans(x): """Convert a Candidate, Mention, or Span to a list of spans.""" if isinstance(x, Candidate): return [_to_span(m) for m in x] elif isinstance(x, Mention): return [x.context] elif isinstance(x, TemporarySpanMention): return [x] else: raise ValueError(f"{type(x)} is an invalid argument type")
python
def _split(expr, pat=None, n=-1): """ Split each string (a la re.split) in the Series/Index by given pattern, propagating NA values. Equivalent to str.split(). :param expr: :param pat: Separator to split on. If None, splits on whitespace :param n: not supported right now :return: list sequence or scalar """ return _string_op(expr, Split, output_type=types.List(types.string), _pat=pat, _n=n)
python
def pair_visual(*args, **kwargs): """Deprecation wrapper""" warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. " "cleverhans.utils.pair_visual may be removed on or after " "2019-04-24.") from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual return new_pair_visual(*args, **kwargs)
python
def suggestions(self,index=None): """Get suggestions for correction. Yields: :class:`Suggestion` element that encapsulate the suggested annotations (if index is ``None``, default) Returns: a :class:`Suggestion` element that encapsulate the suggested annotations (if index is set) Raises: :class:`IndexError` """ if index is None: return self.select(Suggestion,None,False, False) else: for i, e in enumerate(self.select(Suggestion,None,False, False)): if index == i: return e raise IndexError
python
async def list_transactions(self, request): """Fetches list of txns from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of txn ids to include in results Response: data: JSON array of Transaction objects with expanded headers head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link """ paging_controls = self._get_paging_controls(request) validator_query = client_transaction_pb2.ClientTransactionListRequest( head_id=self._get_head_id(request), transaction_ids=self._get_filter_ids(request), sorting=self._get_sorting_message(request, "default"), paging=self._make_paging_message(paging_controls)) response = await self._query_validator( Message.CLIENT_TRANSACTION_LIST_REQUEST, client_transaction_pb2.ClientTransactionListResponse, validator_query) data = [self._expand_transaction(t) for t in response['transactions']] return self._wrap_paginated_response( request=request, response=response, controls=paging_controls, data=data)
java
public UNode toDoc() { UNode rootNode = UNode.createMapNode(m_taskID, "task"); for (String name : m_properties.keySet()) { String value = m_properties.get(name); if (name.endsWith("Time")) { rootNode.addValueNode(name, formatTimestamp(value)); } else { rootNode.addValueNode(name, value); } } return rootNode; }
python
def freefn(self, key, free_fn): """ Set a free function for the specified hash table item. When the item is destroyed, the free function, if any, is called on that item. Use this when hash items are dynamically allocated, to ensure that you don't have memory leaks. You can pass 'free' or NULL as a free_fn. Returns the item, or NULL if there is no such item. """ return c_void_p(lib.zhashx_freefn(self._as_parameter_, key, free_fn))
java
public static String determineKerasBackend(Map<String, Object> modelConfig, KerasModelConfiguration config) { String kerasBackend = null; if (!modelConfig.containsKey(config.getFieldBackend())) { // TODO: H5 files unfortunately do not seem to have this property in keras 1. log.warn("Could not read keras backend used (no " + config.getFieldBackend() + " field found) \n" ); } else { kerasBackend = (String) modelConfig.get(config.getFieldBackend()); } return kerasBackend; }
python
def monitor(msg, *args, **kwargs): """ Log a message with severity 'MON' on the root logger. """ if len(logging.root.handlers) == 0: logging.basicConfig() logging.root.monitor(msg, *args, **kwargs)
java
public static void constraintMatrix3x3a( DMatrixRMaj L_3x6 , DMatrixRMaj L_3x3 ) { int index = 0; for( int i = 0; i < 3; i++ ) { L_3x3.data[index++] = L_3x6.get(i,0); L_3x3.data[index++] = L_3x6.get(i,1); L_3x3.data[index++] = L_3x6.get(i,2); } }
python
def probe_async(self, callback): """Asynchronously connect to a device.""" future = self._loop.launch_coroutine(self._adapter.probe()) future.add_done_callback(lambda x: self._callback_future(None, x, callback))
python
def tone(self, tone_input, sentences=None, tones=None, content_language=None, accept_language=None, content_type=None, **kwargs): """ Analyze general tone. Use the general purpose endpoint to analyze the tone of your input content. The service analyzes the content for emotional and language tones. The method always analyzes the tone of the full document; by default, it also analyzes the tone of each individual sentence of the content. You can submit no more than 128 KB of total input content and no more than 1000 individual sentences in JSON, plain text, or HTML format. The service analyzes the first 1000 sentences for document-level analysis and only the first 100 sentences for sentence-level analysis. Per the JSON specification, the default character encoding for JSON content is effectively always UTF-8; per the HTTP specification, the default encoding for plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When specifying a content type of plain text or HTML, include the `charset` parameter to indicate the character encoding of the input text; for example: `Content-Type: text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and analyzes only the textual content. **See also:** [Using the general-purpose endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint). :param ToneInput tone_input: JSON, plain text, or HTML input that contains the content to be analyzed. For JSON input, provide an object of type `ToneInput`. :param bool sentences: Indicates whether the service is to return an analysis of each individual sentence in addition to its analysis of the full document. If `true` (the default), the service returns results for each sentence. :param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to accept the parameter for backward-compatibility, but the parameter no longer affects the response. **`2016-05-19`:** A comma-separated list of tones for which the service is to return its analysis of the input; the indicated tones apply both to the full document and to individual sentences of the document. You can specify one or more of the valid values. Omit the parameter to request results for all three tones. :param str content_language: The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can use different languages for **Content-Language** and **Accept-Language**. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can use different languages for **Content-Language** and **Accept-Language**. :param str content_type: The type of the input. A character encoding can be specified by including a `charset` parameter. For example, 'text/plain;charset=utf-8'. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if tone_input is None: raise ValueError('tone_input must be provided') if isinstance(tone_input, ToneInput): tone_input = self._convert_model(tone_input, ToneInput) headers = { 'Content-Language': content_language, 'Accept-Language': accept_language, 'Content-Type': content_type } if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone') headers.update(sdk_headers) params = { 'version': self.version, 'sentences': sentences, 'tones': self._convert_list(tones) } if content_type == 'application/json' and isinstance(tone_input, dict): data = json.dumps(tone_input) else: data = tone_input url = '/v3/tone' response = self.request( method='POST', url=url, headers=headers, params=params, data=data, accept_json=True) return response
python
def parametrize_peaks(self, intervals, max_peakwidth=50, min_peakwidth=25, symmetric_bounds=True): """ Computes and stores the intonation profile of an audio recording. :param intervals: these will be the reference set of intervals to which peak positions correspond to. For each interval, the properties of corresponding peak, if exists, will be computed and stored as intonation profile. :param max_peakwidth: the maximum allowed width of the peak at the base for computing parameters of the distribution. :param min_peakwidth: the minimum allowed width of the peak at the base for computing parameters of the distribution. """ assert isinstance(self.pitch_obj.pitch, np.ndarray) valid_pitch = self.pitch_obj.pitch valid_pitch = [i for i in valid_pitch if i > -10000] valid_pitch = np.array(valid_pitch) parameters = {} for i in xrange(len(self.histogram.peaks["peaks"][0])): peak_pos = self.histogram.peaks["peaks"][0][i] #Set left and right bounds of the distribution. max_leftbound = peak_pos - max_peakwidth max_rightbound = peak_pos + max_peakwidth leftbound = max_leftbound rightbound = max_rightbound nearest_valleyindex = utils.find_nearest_index(self.histogram.peaks["valleys"][0], peak_pos) if peak_pos > self.histogram.peaks["valleys"][0][nearest_valleyindex]: leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex] if len(self.histogram.peaks["valleys"][0][nearest_valleyindex + 1:]) == 0: rightbound = peak_pos + max_peakwidth else: offset = nearest_valleyindex + 1 nearest_valleyindex = utils.find_nearest_index( self.histogram.peaks["valleys"][0][offset:], peak_pos) rightbound = self.histogram.peaks["valleys"][0][offset + nearest_valleyindex] else: rightbound = self.histogram.peaks["valleys"][0][nearest_valleyindex] if len(self.histogram.peaks["valleys"][0][:nearest_valleyindex]) == 0: leftbound = peak_pos - max_peakwidth else: nearest_valleyindex = utils.find_nearest_index( self.histogram.peaks["valleys"][0][:nearest_valleyindex], peak_pos) leftbound = self.histogram.peaks["valleys"][0][nearest_valleyindex] #In terms of x-axis, leftbound should be at least min_peakwidth # less than peak_pos, and at max max_peakwidth less than peak_pos, # and viceversa for the rightbound. if leftbound < max_leftbound: leftbound = max_leftbound elif leftbound > peak_pos - min_peakwidth: leftbound = peak_pos - min_peakwidth if rightbound > max_rightbound: rightbound = max_rightbound elif rightbound < peak_pos + min_peakwidth: rightbound = peak_pos + min_peakwidth #If symmetric bounds are asked for, then make the bounds symmetric if symmetric_bounds: if peak_pos - leftbound < rightbound - peak_pos: imbalance = (rightbound - peak_pos) - (peak_pos - leftbound) rightbound -= imbalance else: imbalance = (peak_pos - leftbound) - (rightbound - peak_pos) leftbound += imbalance #extract the distribution and estimate the parameters distribution = valid_pitch[valid_pitch >= leftbound] distribution = distribution[distribution <= rightbound] #print peak_pos, "\t", len(distribution), "\t", leftbound, "\t", rightbound interval_index = utils.find_nearest_index(intervals, peak_pos) interval = intervals[interval_index] _mean = float(np.mean(distribution)) _variance = float(variation(distribution)) _skew = float(skew(distribution)) _kurtosis = float(kurtosis(distribution)) pearson_skew = float(3.0 * (_mean - peak_pos) / np.sqrt(abs(_variance))) parameters[interval] = {"position": float(peak_pos), "mean": _mean, "amplitude": float(self.histogram.peaks["peaks"][1][i]), "variance": _variance, "skew1": _skew, "skew2": pearson_skew, "kurtosis": _kurtosis} self.intonation_profile = parameters
java
public static URI toDirectory(final URI uri, final boolean strict) throws NormalizationException { return resolve(uri, getRawDirectory(uri, strict), strict); }
java
public static Document parseBase64(String base64Data, Element instruction) throws Exception { byte[] imageData = Base64.decodeBase64(base64Data); ByteArrayInputStream bais = new ByteArrayInputStream(imageData); StringWriter swLogger = new StringWriter(); PrintWriter pwLogger = new PrintWriter(swLogger); return parse(bais, instruction, pwLogger); }
python
def table_groupbyagg(table_name): """ Perform a groupby on a table and return an aggregation on a single column. This depends on some request parameters in the URL. "column" and "agg" must always be present, and one of "by" or "level" must be present. "column" is the table column on which aggregation will be performed, "agg" is the aggregation that will be performed, and "by"/"level" define how to group the data. Supported "agg" parameters are: mean, median, std, sum, and size. """ table = orca.get_table(table_name) # column to aggregate column = request.args.get('column', None) if not column or column not in table.columns: abort(400) # column or index level to group by by = request.args.get('by', None) level = request.args.get('level', None) if (not by and not level) or (by and level): abort(400) # aggregation type agg = request.args.get('agg', None) if not agg or agg not in _GROUPBY_AGG_MAP: abort(400) column = table.get_column(column) # level can either be an integer level number or a string level name. # try converting to integer, but if that doesn't work # we go ahead with the string. if level: try: level = int(level) except ValueError: pass gby = column.groupby(level=level) else: by = table.get_column(by) gby = column.groupby(by) result = _GROUPBY_AGG_MAP[agg](gby) return ( result.to_json(orient='split', date_format='iso'), 200, {'Content-Type': 'application/json'})
python
def get_instrument_title(self): """Return the current instrument title """ instrument = self.context.getInstrument() if not instrument: return "" return api.get_title(instrument)
python
def sparse_counts_map(self): """ return a counts map with sparse index scheme """ if self.hpx._ipix is None: flatarray = self.data.flattern() else: flatarray = self.expanded_counts_map() nz = flatarray.nonzero()[0] data_out = flatarray[nz] return (nz, data_out)
java
@Override public void setMessageCopiedWhenSent(boolean copied) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "setMessageCopiedWhenSent", Boolean.valueOf(copied)); _copyMessagesWhenSent = copied; if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "setMessageCopiedWhenSent"); }
java
public static InstalledIdentity load(final File jbossHome, final ProductConfig productConfig, final File... repoRoots) throws IOException { final InstalledImage installedImage = installedImage(jbossHome); return load(installedImage, productConfig, Arrays.<File>asList(repoRoots), Collections.<File>emptyList()); }
java
private void writeShortArrayTagPayload(ShortArrayTag tag) throws IOException { short[] shorts = tag.getValue(); os.writeInt(shorts.length); for (int i = 0; i < shorts.length; i++) { os.writeShort(shorts[i]); } }
python
def FromStream(cls, stream, auto_transfer=True, total_size=None, **kwds): """Create a new Download object from a stream.""" return cls(stream, auto_transfer=auto_transfer, total_size=total_size, **kwds)
java
@SuppressWarnings("unchecked") public <T> DynamicType.Builder<T> makeInterface(Class<T> interfaceType) { return (DynamicType.Builder<T>) makeInterface(Collections.<Type>singletonList(interfaceType)); }
python
def subdevicenames(self) -> Tuple[str, ...]: """A |tuple| containing the device names.""" self: NetCDFVariableBase return tuple(self.sequences.keys())
java
public Activations parse(XMLStreamReader reader) throws Exception { Activations adapters = null; //iterate over tags int iterate; try { iterate = reader.nextTag(); } catch (XMLStreamException e) { //founding a non tag..go on. Normally non-tag found at beginning are comments or DTD declaration iterate = reader.nextTag(); } switch (iterate) { case END_ELEMENT : { // should mean we're done, so ignore it. break; } case START_ELEMENT : { switch (reader.getLocalName()) { case XML.ELEMENT_RESOURCE_ADAPTERS : { adapters = parseResourceAdapters(reader); break; } default : throw new ParserException(bundle.unexpectedElement(reader.getLocalName())); } break; } default : throw new IllegalStateException(); } return adapters; }
python
def jenks(data, num_breaks): """ Calculate Jenks natural breaks. Adapted from http://danieljlewis.org/files/2010/06/Jenks.pdf Credit: Daniel Lewis Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ data = numpy.ma.compressed(data) if len(data) > 1000: data.sort() ls = numpy.linspace(0, len(data)-1, 1000) ls = [int(round(x)) for x in ls] data_list = data[ls] else: data_list = data data_list.sort() mat1 = [] for i in range(0, len(data_list) + 1): temp = [] for j in range(0, num_breaks + 1): temp.append(0) mat1.append(temp) mat2 = [] for i in range(0, len(data_list) + 1): temp = [] for j in range(0, num_breaks + 1): temp.append(0) mat2.append(temp) for i in range(1, num_breaks + 1): mat1[1][i] = 1 mat2[1][i] = 0 for j in range(2, len(data_list) + 1): mat2[j][i] = float('inf') v = 0.0 for l in range(2, len(data_list) + 1): s1 = 0.0 s2 = 0.0 w = 0.0 for m in range(1, l + 1): i3 = l - m + 1 val = float(data_list[i3-1]) s2 += val * val s1 += val w += 1 v = s2 - (s1 * s1) / w i4 = i3 - 1 if i4 != 0: for j in range(2, num_breaks + 1): if mat2[l][j] >= (v + mat2[i4][j - 1]): mat1[l][j] = i3 mat2[l][j] = v + mat2[i4][j - 1] mat1[l][1] = 1 mat2[l][1] = v k = len(data_list) kclass = [] for i in range(0, num_breaks + 1): kclass.append(0) kclass[num_breaks] = float(data_list[len(data_list) - 1]) count_num = num_breaks while count_num >= 2: id = int((mat1[k][count_num]) - 2) kclass[count_num - 1] = data_list[id] k = int((mat1[k][count_num] - 1)) count_num -= 1 return [float(x) for x in kclass][1:]
python
def tables(self): """ A list containing the tables in this container, in document order. Read-only. """ from .table import Table return [Table(tbl, self) for tbl in self._element.tbl_lst]
python
def read_can_msg(self, channel, count): """ Reads one or more CAN-messages from the buffer of the specified CAN channel. :param int channel: CAN channel to read from (:data:`Channel.CHANNEL_CH0`, :data:`Channel.CHANNEL_CH1`, :data:`Channel.CHANNEL_ANY`). :param int count: The number of CAN messages to be received. :return: Tuple with list of CAN message/s received and the CAN channel where the read CAN messages came from. :rtype: tuple(list(CanMsg), int) """ c_channel = BYTE(channel) c_can_msg = (CanMsg * count)() c_count = DWORD(count) UcanReadCanMsgEx(self._handle, byref(c_channel), c_can_msg, byref(c_count)) return c_can_msg[:c_count.value], c_channel.value
python
def config(p_path=None, p_overrides=None): """ Retrieve the config instance. If a path is given, the instance is overwritten by the one that supplies an additional filename (for testability). Moreover, no other configuration files will be read when a path is given. Overrides will discard a setting in any configuration file and use the passed value instead. Structure: (section, option) => value The previous configuration instance will be discarded. """ if not config.instance or p_path is not None or p_overrides is not None: try: config.instance = _Config(p_path, p_overrides) except configparser.ParsingError as perr: raise ConfigError(str(perr)) from perr return config.instance
python
def _clear_weave_cache(): """Deletes the weave cache specified in os.environ['PYTHONCOMPILED']""" cache_dir = os.environ['PYTHONCOMPILED'] if os.path.exists(cache_dir): shutil.rmtree(cache_dir) logging.info("Cleared weave cache %s", cache_dir)
java
public static RBACModel createRandomModel(Collection<String> users, Collection<String> transactions, Collection<String> roles){ Validate.notNull(transactions); Validate.notEmpty(transactions); Validate.noNullElements(transactions); SOABase context = new SOABase("c1"); context.setSubjects(users); context.setActivities(transactions); RoleLattice roleLattice = new RoleLattice(roles); RBACModel rbac = new RBACModel("rbac1", context, roleLattice); //Role membership and permissions List<String> transactionList = new ArrayList<>(); transactionList.addAll(transactions); Collections.shuffle(transactionList); List<List<String>> rolePartitions = CollectionUtils.exponentialPartition(users, roles.size()); List<List<String>> activityPartitions = CollectionUtils.exponentialPartition(transactionList, roles.size()); List<String> roleList = new ArrayList<>(); roleList.addAll(rbac.getRoles()); for(int i=0; i<rolePartitions.size(); i++){ try { rbac.setRoleMembership(roleList.get(i), rolePartitions.get(i)); rbac.setActivityPermission(roleList.get(i), new HashSet<>(activityPartitions.get(i))); } catch (Exception e) { throw new RuntimeException(e); } } return rbac; }
python
def SoS_eval(expr: str, extra_dict: dict = {}) -> Any: '''Evaluate an expression with sos dict.''' return eval(expr, env.sos_dict.dict(), extra_dict)
python
def get_nameserver_detail_output_show_nameserver_nameserver_xlatedomain(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop('nameserver_portid') nameserver_xlatedomain = ET.SubElement(show_nameserver, "nameserver-xlatedomain") nameserver_xlatedomain.text = kwargs.pop('nameserver_xlatedomain') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def make_exploded_column(df, colname_new, colname_old): """ Internal helper function used by `explode_columns()`. """ s = df[colname_old].apply(pd.Series).stack() s.name = colname_new return s
java
public Lock getLock(String lockName, LockType type, String comment) { return new SQLLock(lockName,type,comment, getSleepTime(), this); }
python
def get_as_datetime_with_default(self, index, default_value): """ Converts array element into a Date or returns default value if conversion is not possible. :param index: an index of element to get. :param default_value: the default value :return: Date value ot the element or default value if conversion is not supported. """ value = self[index] return DateTimeConverter.to_datetime_with_default(value, default_value)
java
@Override protected String doForward(final String hierarchical) { log.debug("Converting outgoing identifier: {}", hierarchical); final List<String> segments = asList(hierarchical.split(separator)); if (segments.size() <= levels) { // must be a root identifier return ""; } List<String> firstSegments = emptyList(); List<String> lastSegment = emptyList(); if (segments.size() > levels + 1) { // we subtract one for the final segment, then levels for the // inserted hierarchy segments we want to remove firstSegments = segments.subList(0, segments.size() - 1 - levels); lastSegment = singletonList(getLast(segments)); } else { // just the trailing non-hierarchical segment lastSegment = singletonList(getLast(segments)); } return on(separator).join(concat(firstSegments, lastSegment)); }
python
def read(self, length, timeout=None): """Read up to `length` number of bytes from the serial port with an optional timeout. `timeout` can be positive for a timeout in seconds, 0 for a non-blocking read, or negative or None for a blocking read that will block until `length` number of bytes are read. Default is a blocking read. For a non-blocking or timeout-bound read, read() may return data whose length is less than or equal to the requested length. Args: length (int): length in bytes. timeout (int, float, None): timeout duration in seconds. Returns: bytes: data read. Raises: SerialError: if an I/O or OS error occurs. """ data = b"" # Read length bytes if timeout is None # Read up to length bytes if timeout is not None while True: if timeout is not None: # Select (rlist, _, _) = select.select([self._fd], [], [], timeout) # If timeout if self._fd not in rlist: break try: data += os.read(self._fd, length - len(data)) except OSError as e: raise SerialError(e.errno, "Reading serial port: " + e.strerror) if len(data) == length: break return data
java
private void writeNewException(Exception ex) throws IOException { output.writeByte(TC_EXCEPTION); resetSeenObjects(); writeObjectInternal(ex, false, false, false); // No replacements resetSeenObjects(); }
python
def _is_data_from_today(self, data_point): """ Takes a DataPoint from SESConnection.get_send_statistics() and returns True if it is talking about the current date, False if not. :param dict data_point: The data point to consider. :rtype: bool :returns: True if this data_point is for today, False if not (probably yesterday). """ today = datetime.date.today() raw_timestr = data_point['Timestamp'] dtime = datetime.datetime.strptime(raw_timestr, '%Y-%m-%dT%H:%M:%SZ') return today.day == dtime.day
python
def all(self): """ Synchronize all registered plugins and plugin points to database. """ # Django >= 1.9 changed something with the migration logic causing # plugins to be executed before the corresponding database tables # exist. This method will only return something if the database # tables have already been created. # XXX: I don't fully understand the issue and there should be # another way but this appears to work fine. if django_version >= (1, 9) and ( not db_table_exists(Plugin._meta.db_table) or not db_table_exists(PluginPoint._meta.db_table)): return self.points()
python
def fit(self, X, y): """Fit the model using X as training data and y as target values""" self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
java
public void commit(Xid xid, boolean onePhase) throws XAException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "commit", new Object[] {xid, ""+onePhase}); internalCommit(xid, onePhase); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "commit"); }
java
private boolean removeRecoveryRecord(RecoveryAgent recoveryAgent, FailureScope failureScope) { if (tc.isEntryEnabled()) Tr.entry(tc, "removeRecoveryRecord", new Object[] { recoveryAgent, failureScope, this }); boolean found = false; synchronized (_outstandingRecoveryRecords) { final HashSet recoveryAgentSet = _outstandingRecoveryRecords.get(failureScope); if (recoveryAgentSet != null) { found = recoveryAgentSet.remove(recoveryAgent); } } if (tc.isEntryEnabled()) Tr.exit(tc, "removeRecoveryRecord", found); return found; }
java
@Override protected Result check() throws Exception { final ClusterHealthStatus status = client.admin().cluster().prepareHealth().get().getStatus(); if (status == ClusterHealthStatus.RED || (failOnYellow && status == ClusterHealthStatus.YELLOW)) { return Result.unhealthy("Last status: %s", status.name()); } else { return Result.healthy("Last status: %s", status.name()); } }
java
@Override public ClassLoader getClassLoader() { // To follow the same behavior of Class.forName(...) I had to play // dirty (Supported by Sun, IBM & BEA JVMs) try { // Get a reference to this class' class-loader ClassLoader cl = this.getClass().getClassLoader(); // Create a method instance representing the protected // getCallerClassLoader method of class ClassLoader Method mthd = ClassLoader.class.getDeclaredMethod("getCallerClassLoader", new Class[0]); // Make the method accessible. AccessibleObject.setAccessible(new AccessibleObject[] {mthd}, true); // Try to get the caller's class-loader return (ClassLoader) mthd.invoke(cl, new Object[0]); } catch (Exception all) { // Use this class' class-loader return this.getClass().getClassLoader(); } }
python
def percent_k(data, period): """ %K. Formula: %k = data(t) - low(n) / (high(n) - low(n)) """ catch_errors.check_for_period_error(data, period) percent_k = [((data[idx] - np.min(data[idx+1-period:idx+1])) / (np.max(data[idx+1-period:idx+1]) - np.min(data[idx+1-period:idx+1]))) for idx in range(period-1, len(data))] percent_k = fill_for_noncomputable_vals(data, percent_k) return percent_k
python
def removc(item, inset): """ Remove an item from a character set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removc_c.html :param item: Item to be removed. :type item: str :param inset: Set to be updated. :type inset: spiceypy.utils.support_types.SpiceCell """ assert isinstance(inset, stypes.SpiceCell) assert inset.dtype == 0 item = stypes.stringToCharP(item) libspice.removc_c(item, ctypes.byref(inset))
java
@Override public String getFilename(final FinderObject owner) { if (owner instanceof Root) { return ((Root) owner).getTheFilename(); } return owner.getFilename(); }
python
def send_task(self, request, response): """send off a celery task for the current page and recache""" # TODO is this too messy? from bettercache.tasks import GeneratePage try: GeneratePage.apply_async((strip_wsgi(request),)) except: logger.error("failed to send celery task") self.set_cache(request, response)
python
def _format_grndt(self, data_c): """Format monthly ground data collection into string for the EPW header.""" monthly_str = '{},{},{},{}'.format( data_c.header.metadata['soil conductivity'], data_c.header.metadata['soil density'], data_c.header.metadata['soil specific heat'], ','.join(['%.2f' % x for x in data_c.values])) return monthly_str
python
def remove_columns(self, column_names, inplace=False): """ Returns an SFrame with one or more columns removed. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_names : list or iterable A list or iterable of column names. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with given columns removed. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]}) >>> res = sf.remove_columns(['val1', 'val2']) >>> res +----+ | id | +----+ | 1 | | 2 | | 3 | +----+ [3 rows x 1 columns] """ column_names = list(column_names) existing_columns = dict((k, i) for i, k in enumerate(self.column_names())) for name in column_names: if name not in existing_columns: raise KeyError('Cannot find column %s' % name) # Delete it going backwards so we don't invalidate indices deletion_indices = sorted(existing_columns[name] for name in column_names) if inplace: ret = self else: ret = self.copy() for colid in reversed(deletion_indices): with cython_context(): ret.__proxy__.remove_column(colid) ret._cache = None return ret
java
public static <T> T newInstance(Constructor<T> constructor, Object... args) { try { return constructor.newInstance(args); } catch (Exception e) { throw new ClientException(e); } }
python
def create_combobox(self, text, choices, option, default=NoDefault, tip=None, restart=False): """choices: couples (name, key)""" label = QLabel(text) combobox = QComboBox() if tip is not None: combobox.setToolTip(tip) for name, key in choices: if not (name is None and key is None): combobox.addItem(name, to_qvariant(key)) # Insert separators count = 0 for index, item in enumerate(choices): name, key = item if name is None and key is None: combobox.insertSeparator(index + count) count += 1 self.comboboxes[combobox] = (option, default) layout = QHBoxLayout() layout.addWidget(label) layout.addWidget(combobox) layout.addStretch(1) layout.setContentsMargins(0, 0, 0, 0) widget = QWidget(self) widget.label = label widget.combobox = combobox widget.setLayout(layout) combobox.restart_required = restart combobox.label_text = text return widget
python
def get(self, container_id): """ Get a container by name or ID. Args: container_id (str): Container name or ID. Returns: A :py:class:`Container` object. Raises: :py:class:`docker.errors.NotFound` If the container does not exist. :py:class:`docker.errors.APIError` If the server returns an error. """ resp = self.client.api.inspect_container(container_id) return self.prepare_model(resp)
java
public void unpublishActor(Actor act) { Long integer = publishedActorMappingReverse.get(act.getActorRef()); if ( integer != null ) { Log.Debug(this, ""+act.getClass().getSimpleName()+" unpublished"); publishedActorMap.remove(integer); publishedActorMappingReverse.remove(act.getActorRef()); act.__removeRemoteConnection(this); if ( act instanceof RemotedActor) { String connectionIdentifier = getSocketRef().getConnectionIdentifier(); ((RemotedActor) act).hasBeenUnpublished(connectionIdentifier); } } }
java
void log(Level level, String msg) { if (msg != null && !msg.isEmpty()) logger.log(level, msg); }
java
public static void registerCacheObject(Object mxbean, String cacheManagerName, String name, boolean stats) { synchronized (mBeanServer) { //these can change during runtime, so always look it up ObjectName registeredObjectName = calculateObjectName(cacheManagerName, name, stats); try { if (!isRegistered(cacheManagerName, name, stats)) { mBeanServer.registerMBean(mxbean, registeredObjectName); } } catch (Exception e) { throw new CacheException( "Error registering cache MXBeans for CacheManager " + registeredObjectName + ". Error was " + e.getMessage(), e); } } }
java
public static ApiPlatform fromKeyToApiPlatform(int key) { if(keyToPlatform.get() == null) { Map<Integer, ApiPlatform> map = new HashMap<>(); for (ApiPlatform apiPlatform : ApiPlatform.values()) { map.put(apiPlatform.getKey(), apiPlatform); } keyToPlatform.set(Collections.unmodifiableMap(map)); } return keyToPlatform.get().get(key); }
java
public String removeAfter(String original, String marker) { int index = original.indexOf(marker); if (index != -1) { return original.substring(0, index); } return original; }
java
@Override public void setArray(int parameterIndex, Array x) throws SQLException { internalStmt.setArray(parameterIndex, x); }
java
public TCQueryRequest createQuery(Dialog d, boolean dialogTermitationPermission) { if (d == null) { throw new NullPointerException("Dialog is null"); } TCQueryRequestImpl tcbr = new TCQueryRequestImpl(dialogTermitationPermission); tcbr.setDialog(d); tcbr.setDestinationAddress(d.getRemoteAddress()); tcbr.setOriginatingAddress(d.getLocalAddress()); return tcbr; }
java
@Override public CPOptionValue fetchByCPOptionId_Last(long CPOptionId, OrderByComparator<CPOptionValue> orderByComparator) { int count = countByCPOptionId(CPOptionId); if (count == 0) { return null; } List<CPOptionValue> list = findByCPOptionId(CPOptionId, count - 1, count, orderByComparator); if (!list.isEmpty()) { return list.get(0); } return null; }
python
def _fit_bmr_model(self, X, y): """Private function used to fit the BayesMinimumRisk model.""" self.f_bmr = BayesMinimumRiskClassifier() X_bmr = self.predict_proba(X) self.f_bmr.fit(y, X_bmr) return self
python
def add(self, key, value, key_length=0): """Add value to key-value Params: <str> key <int> value <int> key_length Return: <int> key_value """ if key_length < 1: key_length = len(key) val = self.add_method(self, key, key_length, value) if self.k: self._update(key, value) return val
python
def _doSendRequest(self, data, get_thread_id=False): """Sends the data to `SendURL`, and returns the message ID or None on failure""" j = self._post(self.req_url.SEND, data, fix_request=True, as_json=True) # update JS token if received in response fb_dtsg = get_jsmods_require(j, 2) if fb_dtsg is not None: self._payload_default["fb_dtsg"] = fb_dtsg try: message_ids = [ (action["message_id"], action["thread_fbid"]) for action in j["payload"]["actions"] if "message_id" in action ] if len(message_ids) != 1: log.warning("Got multiple message ids' back: {}".format(message_ids)) if get_thread_id: return message_ids[0] else: return message_ids[0][0] except (KeyError, IndexError, TypeError) as e: raise FBchatException( "Error when sending message: " "No message IDs could be found: {}".format(j) )
java
private void processParentPage(PageWrapper parentPage, PageModificationContext context) { if (context.getDeletedPageKey() != null) { // We have a deleted page. Remove its pointer from the parent. parentPage.getPage().update(Collections.singletonList(PageEntry.noValue(context.getDeletedPageKey()))); parentPage.markNeedsFirstKeyUpdate(); } else { // Update parent page's child pointers for modified pages. val toUpdate = context.getUpdatedPagePointers().stream() .map(pp -> new PageEntry(pp.getKey(), serializePointer(pp))) .collect(Collectors.toList()); parentPage.getPage().update(toUpdate); } }
java
public PutIntegrationResponseRequest withResponseParameters(java.util.Map<String, String> responseParameters) { setResponseParameters(responseParameters); return this; }
java
static Connection connectWithURL(String username, String password, String jdbcURL, String driverName) throws ClassNotFoundException, SQLException { String driver = (Objects.isNull(driverName) || driverName.isEmpty()) ? "com.mysql.cj.jdbc.Driver" : driverName; return doConnect(driver, jdbcURL, username, password); }
python
def generate(declaration, headers=None, has_iterators=False): """Compile and load the reflection dictionary for a type. If the requested dictionary has already been cached, then load that instead. Parameters ---------- declaration : str A type declaration (for example "vector<int>") headers : str or list of str A header file or list of header files required to compile the dictionary for this type. has_iterators : bool If True, then include iterators in the dictionary generation. """ global NEW_DICTS # FIXME: _rootpy_dictionary_already_exists returns false positives # if a third-party module provides "incomplete" dictionaries. #if compiled._rootpy_dictionary_already_exists(declaration): # log.debug("generate({0}) => already available".format(declaration)) # return log.debug("requesting dictionary for {0}".format(declaration)) if headers: if isinstance(headers, string_types): headers = sorted(headers.split(';')) log.debug("using the headers {0}".format(', '.join(headers))) unique_name = ';'.join([declaration] + headers) else: unique_name = declaration unique_name = unique_name.replace(' ', '') # If the library is already loaded, do nothing if unique_name in LOADED_DICTS: log.debug("dictionary for {0} is already loaded".format(declaration)) return if sys.version_info[0] < 3: libname = hashlib.sha512(unique_name).hexdigest()[:16] else: libname = hashlib.sha512(unique_name.encode('utf-8')).hexdigest()[:16] libnameso = libname + ".so" if ROOT.gROOT.GetVersionInt() < 53403: # check for this class in the global TClass list and remove it # fixes infinite recursion in ROOT < 5.34.03 # (exact ROOT versions where this is required is unknown) cls = ROOT.gROOT.GetClass(declaration) if cls and not cls.IsLoaded(): log.debug("removing {0} from gROOT.GetListOfClasses()".format( declaration)) ROOT.gROOT.GetListOfClasses().Remove(cls) # If a .so already exists for this class, use it. if exists(pjoin(DICTS_PATH, libnameso)): log.debug("loading previously generated dictionary for {0}" .format(declaration)) if (ROOT.gInterpreter.Load(pjoin(DICTS_PATH, libnameso)) not in (0, 1)): raise RuntimeError( "failed to load the library for '{0}' @ {1}".format( declaration, libname)) LOADED_DICTS[unique_name] = None return with lock(pjoin(DICTS_PATH, "lock"), poll_interval=5, max_age=60): # This dict was not previously generated so we must create it now log.info("generating dictionary for {0} ...".format(declaration)) includes = '' if headers is not None: for header in headers: if re.match('^<.+>$', header): includes += '#include {0}\n'.format(header) else: includes += '#include "{0}"\n'.format(header) source = LINKDEF % locals() sourcepath = os.path.join(DICTS_PATH, '{0}.C'.format(libname)) log.debug("source path: {0}".format(sourcepath)) with open(sourcepath, 'w') as sourcefile: sourcefile.write(source) log.debug("include path: {0}".format( ROOT.gSystem.GetIncludePath())) if (ROOT.gSystem.CompileMacro( sourcepath, 'k-', libname, DICTS_PATH) != 1): raise RuntimeError( "failed to compile the library for '{0}'".format( sourcepath)) LOADED_DICTS[unique_name] = None NEW_DICTS = True
python
def do_child_matches(self, params): """ \x1b[1mNAME\x1b[0m child_matches - Prints paths that have at least 1 child that matches <pattern> \x1b[1mSYNOPSIS\x1b[0m child_matches <path> <pattern> [inverse] \x1b[1mOPTIONS\x1b[0m * inverse: display paths which don't match (default: false) \x1b[1mEXAMPLES\x1b[0m > child_matches /services/registrations member_ /services/registrations/foo /services/registrations/bar ... """ seen = set() # we don't want to recurse once there's a child matching, hence exclude_recurse= for path in self._zk.fast_tree(params.path, exclude_recurse=params.pattern): parent, child = split(path) if parent in seen: continue match = params.pattern in child if params.inverse: if not match: self.show_output(parent) seen.add(parent) else: if match: self.show_output(parent) seen.add(parent)
python
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs): """Estimate the seasonal differencing term, ``D``. Perform a test of seasonality for different levels of ``D`` to estimate the number of seasonal differences required to make a given time series stationary. Will select the maximum value of ``D`` for which the time series is judged seasonally stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. m : int The number of seasonal periods (i.e., frequency of the time series) max_D : int, optional (default=2) Maximum number of seasonal differences allowed. Must be a positive integer. The estimated value of ``D`` will not exceed ``max_D``. test : str, optional (default='ocsb') Type of unit root test of seasonality to use in order to detect seasonal periodicity. Valid tests include ("ocsb", "ch"). Note that the CHTest is very slow for large data. Returns ------- D : int The estimated seasonal differencing term. This is the maximum value of ``D`` such that ``D <= max_D`` and the time series is judged seasonally stationary. If the time series is constant, will return 0. """ if max_D <= 0: raise ValueError('max_D must be a positive integer') # get the test - this validates m internally testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\ .estimate_seasonal_differencing_term x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) if is_constant(x): return 0 D = 0 dodiff = testfunc(x) while dodiff == 1 and D < max_D: D += 1 x = diff(x, lag=m) if is_constant(x): return D dodiff = testfunc(x) return D
python
def get_ring(self, tree_map, parent_map): """ get a ring connection used to recover local data """ assert parent_map[0] == -1 rlst = self.find_share_ring(tree_map, parent_map, 0) assert len(rlst) == len(tree_map) ring_map = {} nslave = len(tree_map) for r in range(nslave): rprev = (r + nslave - 1) % nslave rnext = (r + 1) % nslave ring_map[rlst[r]] = (rlst[rprev], rlst[rnext]) return ring_map
python
def convert_flatten(builder, layer, input_names, output_names, keras_layer): """Convert a flatten layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) # blob_order == 0 if the input blob needs not be rearranged # blob_order == 1 if the input blob needs to be rearranged blob_order = 0 # using keras_layer.input.shape have a "?" (Dimension[None] at the front), # making a 3D tensor with unknown batch size 4D if len(keras_layer.input.shape) == 4: blob_order = 1 builder.add_flatten(name=layer, mode=blob_order, input_name=input_name, output_name=output_name)
python
def edit_ticket_links(self, ticket_id, **kwargs): """ Edit ticket links. .. warning:: This method is deprecated in favour of edit_link method, because there exists bug in RT 3.8 REST API causing mapping created links to ticket/1. The only drawback is that edit_link cannot process multiple links all at once. :param ticket_id: ID of ticket to edit :keyword kwargs: Other arguments possible to set: DependsOn, DependedOnBy, RefersTo, ReferredToBy, Members, MemberOf. Each value should be either ticker ID or external link. Int types are converted. Use empty string as value to delete existing link. :returns: ``True`` Operation was successful ``False`` Ticket with given ID does not exist or unknown parameter was set (in this case all other valid fields are changed) """ post_data = '' for key in kwargs: post_data += "{}: {}\n".format(key, str(kwargs[key])) msg = self.__request('ticket/{}/links'.format(str(ticket_id), ), post_data={'content': post_data}) state = msg.split('\n')[2] return self.RE_PATTERNS['links_updated_pattern'].match(state) is not None
java
private static EConv open0(byte[] source, byte[] destination, int ecflags) { // final Encoding senc = EncodingDB.getEncodings().get(source).getEncoding(); // final Encoding denc = EncodingDB.getEncodings().get(destination).getEncoding(); final int numTrans; final Entry[] entries; if (source.length == 0 && destination.length == 0) { numTrans = 0; entries = null; } else { final ObjPtr<Entry[]> lentries = new ObjPtr<Entry[]>(); numTrans = searchPath(source, destination, new SearchPathCallback() { int additional = 0; public void call(byte[] source, byte[] destination, int depth) { if (lentries.p == null) lentries.p = new Entry[depth + 1 + additional]; lentries.p[depth] = getEntry(source, destination); } }); entries = lentries.p; if (numTrans < 0) { return null; } } EConv ec = openByTranscoderEntries(numTrans, entries); if (ec == null) return null; ec.flags = ecflags; ec.source = source; ec.destination = destination; return ec; }
python
def add_files(self, *filenames, **kw): """ Include added and/or removed files in the working tree in the next commit. :param filenames: The filenames of the files to include in the next commit (zero or more strings). If no arguments are given all untracked files are added. :param kw: Keyword arguments are ignored (instead of raising :exc:`~exceptions.TypeError`) to enable backwards compatibility with older versions of `vcs-repo-mgr` where the keyword argument `all` was used. """ # Make sure the local repository exists and supports a working tree. self.create() self.ensure_working_tree() # Include added and/or removed files in the next commit. logger.info("Staging changes to be committed in %s ..", format_path(self.local)) self.context.execute(*self.get_add_files_command(*filenames))