language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def row_contributions(self, X): """Returns the row contributions towards each principal component.""" utils.validation.check_is_fitted(self, 's_') # Check input if self.check_input: utils.check_array(X, dtype=[str, np.number]) # Prepare input X = self._prepare_input(X) return super().row_contributions(self._build_X_global(X))
java
@Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case AfplibPackage.GCMRKRG__XPOS: return getXPOS(); case AfplibPackage.GCMRKRG__YPOS: return getYPOS(); } return super.eGet(featureID, resolve, coreType); }
java
public static double det( DMatrix2x2 mat ) { return mat.a11*mat.a22 - mat.a12*mat.a21; }
java
public boolean isJDBCDriver(String jdbcDriverName) throws Exception { Address addr = Address.root().add(SUBSYSTEM, SUBSYSTEM_DATASOURCES); String haystack = JDBC_DRIVER; return null != findNodeInList(addr, haystack, jdbcDriverName); }
java
public PlacementDescription withAttributes(java.util.Map<String, String> attributes) { setAttributes(attributes); return this; }
java
public Word07Writer flush(OutputStream out, boolean isCloseOut) throws IORuntimeException { Assert.isFalse(this.isClosed, "WordWriter has been closed!"); try { this.doc.write(out); out.flush(); } catch (IOException e) { throw new IORuntimeException(e); } finally { if (isCloseOut) { IoUtil.close(out); } } return this; }
java
private void revertInsertRow(long id, int row, boolean reuseRow) { // INFORM INSIDER insider.uninserting(clazz, id); idColl.cancelId(id); // UNDO CHANGE #1 if (reuseRow) { deleted.add(row); // UNDO CHANGE #2 } else { rows--; // UNDO CHANGE #3 } idColl.delete(id); // UNDO CHANGE #4 size--; // UNDO CHANGE #5 ids.remove(id); // UNDO CHANGE #6 if (reuseRow) { // NO NEED TO UNDO CHANGE #7a } else { records.remove(records.size() - 1); // UNDO CHANGE #7b } }
java
@Nullable public Animator createAnimator(@NonNull ViewGroup sceneRoot, @Nullable TransitionValues startValues, @Nullable TransitionValues endValues) { return null; }
python
def _set_frr_cspf_group_computation_mode(self, v, load=False): """ Setter method for frr_cspf_group_computation_mode, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/frr_cspf_group_computation_mode (frr-cspf-group-computation-mode) If this variable is read-only (config: false) in the source YANG file, then _set_frr_cspf_group_computation_mode is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_frr_cspf_group_computation_mode() directly. YANG Description: CSPF group computation mode """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'frr-cspf-group-comp-mode-add-penalty': {'value': 1}, u'frr-cspf-group-comp-mode-high-penalty': {'value': 3}, u'frr-cspf-group-comp-mode-exclude-groups': {'value': 2}, u'frr-cspf-group-comp-mode-disabled': {'value': 0}},), is_leaf=True, yang_name="frr-cspf-group-computation-mode", rest_name="frr-cspf-group-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='frr-cspf-group-computation-mode', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """frr_cspf_group_computation_mode must be of a type compatible with frr-cspf-group-computation-mode""", 'defined-type': "brocade-mpls-operational:frr-cspf-group-computation-mode", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'frr-cspf-group-comp-mode-add-penalty': {'value': 1}, u'frr-cspf-group-comp-mode-high-penalty': {'value': 3}, u'frr-cspf-group-comp-mode-exclude-groups': {'value': 2}, u'frr-cspf-group-comp-mode-disabled': {'value': 0}},), is_leaf=True, yang_name="frr-cspf-group-computation-mode", rest_name="frr-cspf-group-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='frr-cspf-group-computation-mode', is_config=False)""", }) self.__frr_cspf_group_computation_mode = t if hasattr(self, '_set'): self._set()
python
def _gpio_callback(self, gpio): """ Gets triggered whenever the the gpio state changes :param gpio: Number of gpio that changed :type gpio: int :rtype: None """ self.debug(u"Triggered #{}".format(gpio)) try: index = self.gpios.index(gpio) except ValueError: self.error(u"{} not present in GPIO list".format(gpio)) return with self._people_lock: person = self.people[index] read_val = GPIO.input(gpio) if read_val == person.sitting: # Nothing changed? time.sleep(self.gpio_bouncetime_sleep) # Really sure? read_val = GPIO.input(gpio) if person.sitting != read_val: person.sitting = read_val self.debug(u"Person is now {}sitting".format( "" if person.sitting else "not ") ) try: self.changer.on_person_update(self.people) except: self.exception( u"Failed to update people (Person: {})".format(person) ) else: self.warning(u"Nothing changed on {}".format(gpio))
java
public void marshall(TypedLinkFacet typedLinkFacet, ProtocolMarshaller protocolMarshaller) { if (typedLinkFacet == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(typedLinkFacet.getName(), NAME_BINDING); protocolMarshaller.marshall(typedLinkFacet.getAttributes(), ATTRIBUTES_BINDING); protocolMarshaller.marshall(typedLinkFacet.getIdentityAttributeOrder(), IDENTITYATTRIBUTEORDER_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def load_image(file) -> DataAndMetadata.DataAndMetadata: """ Loads the image from the file-like object or string file. If file is a string, the file is opened and then read. Returns a numpy ndarray of our best guess for the most important image in the file. """ if isinstance(file, str) or isinstance(file, str): with open(file, "rb") as f: return load_image(f) dmtag = parse_dm3.parse_dm_header(file) dmtag = fix_strings(dmtag) # display_keys(dmtag) img_index = -1 image_tags = dmtag['ImageList'][img_index] data = imagedatadict_to_ndarray(image_tags['ImageData']) calibrations = [] calibration_tags = image_tags['ImageData'].get('Calibrations', dict()) for dimension in calibration_tags.get('Dimension', list()): origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str()) calibrations.append((-origin * scale, scale, units)) calibrations = tuple(reversed(calibrations)) if len(data.shape) == 3 and data.dtype != numpy.uint8: if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): if data.shape[1] == 1: data = numpy.squeeze(data, 1) data = numpy.moveaxis(data, 0, 1) data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1) calibrations = (calibrations[2], calibrations[0]) else: data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1) calibrations = tuple(calibrations[1:]) + (calibrations[0],) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2) elif len(data.shape) == 4 and data.dtype != numpy.uint8: # data = numpy.moveaxis(data, 0, 2) data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2) elif data.dtype == numpy.uint8: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1])) else: data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape)) brightness = calibration_tags.get('Brightness', dict()) origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str()) intensity = -origin * scale, scale, units timestamp = None timezone = None timezone_offset = None title = image_tags.get('Name') properties = dict() if 'ImageTags' in image_tags: voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict()) if voltage: properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) } dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal') if dm_metadata_signal and dm_metadata_signal.lower() == "eels": properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"): data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1 data_descriptor.datum_dimension_count = 1 if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0: data_descriptor.is_sequence = True data_descriptor.collection_dimension_count -= 1 timestamp_str = image_tags['ImageTags'].get("Timestamp") if timestamp_str: timestamp = get_datetime_from_timestamp_str(timestamp_str) timezone = image_tags['ImageTags'].get("Timezone") timezone_offset = image_tags['ImageTags'].get("TimezoneOffset") # to avoid having duplicate copies in Swift, get rid of these tags image_tags['ImageTags'].pop("Timestamp", None) image_tags['ImageTags'].pop("Timezone", None) image_tags['ImageTags'].pop("TimezoneOffset", None) # put the image tags into properties properties.update(image_tags['ImageTags']) dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations] while len(dimensional_calibrations) < data_descriptor.expected_dimension_count: dimensional_calibrations.append(Calibration.Calibration()) intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2]) return DataAndMetadata.new_data_and_metadata(data, data_descriptor=data_descriptor, dimensional_calibrations=dimensional_calibrations, intensity_calibration=intensity_calibration, metadata=properties, timestamp=timestamp, timezone=timezone, timezone_offset=timezone_offset)
java
public void setDayFormatter(DayFormatter formatter) { adapter.setDayFormatter(formatter == null ? DayFormatter.DEFAULT : formatter); }
java
public void setCharacterStream(final int parameterIndex, final Reader reader) throws SQLException { if(reader == null) { setNull(parameterIndex, Types.BLOB); return; } try { setParameter(parameterIndex, new BufferedReaderParameter(reader)); } catch (IOException e) { throw SQLExceptionMapper.getSQLException("Could not read reader", e); } }
java
private void setXPathCharacteristics(IdentifierImpl identifier) throws InvalidXPathSyntaxException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) tc.entry(cclass, "setXPathCharacteristics", "identifier: " + identifier); // Need to set the domain to XPATH1.0 identifier.setSelectorDomain(2); // Set the locationStep also identifier.setStep(locationStep); // Call XPath to compile the XPath1.0 expression and store the // resultant XPathExpression in the Identifier. XPathExpression xpexp = null; try { // Parse an expression up-front Node node = null; NodeList ns = null; XPath xpath0 = XPathFactory.newInstance().newXPath(); // If a namespace context has been set then set it into the XPath env if(namespaceContext != null) xpath0.setNamespaceContext(namespaceContext); xpexp = xpath0.compile(identifier.getName()); } catch (Exception ex) { // No FFDC Code Needed. // We don't FFDC because we'll catch this exception and then attempt // to parse the entire expression. If that fails, then we FFDC. if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) tc.exit(this,cclass, "setXPathCharacteristics", ex); throw new InvalidXPathSyntaxException(identifier.getName()); } // Store xpexp in the Identifier identifier.setCompiledExpression(xpexp); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) tc.exit(this,cclass, "setXPathCharacteristics"); }
python
def get(cls, whitelist_id, whitelist_result_id, note_text_whitelist_result_id, monetary_account_id=None, custom_headers=None): """ :type api_context: context.ApiContext :type user_id: int :type monetary_account_id: int :type whitelist_id: int :type whitelist_result_id: int :type note_text_whitelist_result_id: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseNoteTextWhitelistResult """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) endpoint_url = cls._ENDPOINT_URL_READ.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), whitelist_id, whitelist_result_id, note_text_whitelist_result_id) response_raw = api_client.get(endpoint_url, {}, custom_headers) return BunqResponseNoteTextWhitelistResult.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_GET) )
python
def get_model(self): ''' `object` of model as a function approximator, which has `lstm_model` whose type is `pydbm.rnn.lstm_model.LSTMModel`. ''' class Model(object): def __init__(self, lstm_model): self.lstm_model = lstm_model return Model(self.__lstm_model)
python
def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''): ''' Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label. ''' _, ext = os.path.splitext(filename) if ext != '.png': filename += '.png' gnuplot_cmds = \ ''' set datafile separator "," set term pngcairo size 30cm,25cm set out filename unset key set border lw 1.5 set grid lt -1 lc rgb "gray80" set title title set xlabel x_label set ylabel y_label plot filename_data u 1:2 w lp pt 6 ps 0.5 ''' scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataTemp(x, y) args_dict = { 'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label } gnuplot(scr.name, args_dict)
python
def validate(self, value): """ Return True if the choice is an integer; False otherwise. If the value was cast successfully to an int, set the choice that will make its way into the answers dict to the cast int value, not the string representation. """ try: int_value = int(value) self._choice = int_value return True except ValueError: self.error_message = '%s is not a valid integer.' % value return False
java
public static Object extractWriteArray(final DeviceAttribute da, final AttrWriteType writeType, final AttrDataFormat format) throws DevFailed { if (da == null) { throw DevFailedUtils.newDevFailed(ERROR_MSG_DA); } return InsertExtractFactory.getAttributeExtractor(da.getType()).extractWriteArray(da, writeType, format); }
python
def get_versio_versioning_scheme(full_class_path): """Return a class based on it's full path""" module_path = '.'.join(full_class_path.split('.')[0:-1]) class_name = full_class_path.split('.')[-1] try: module = importlib.import_module(module_path) except ImportError: raise RuntimeError('Invalid specified Versio schema {}'.format(full_class_path)) try: return getattr(module, class_name) except AttributeError: raise RuntimeError( 'Could not find Versio schema class {!r} inside {!r} module.'.format( class_name, module_path))
python
def html(self, label, *msg): """ Prints html in notebook """ lbl = "[" + label + "] " txt = lbl + " " + " ".join(list(msg)) if self.notebook is True: html = HTML(txt) display(lbl + html) else: print(lbl + txt)
java
private boolean ignoreMethod(String name) { boolean result = false; for (String ignoredName : IGNORED_METHODS) { if (name.matches(ignoredName)) { result = true; break; } } return result; }
python
def update_reimburse(self, card_id, encrypt_code, reimburse_status): """ 报销方更新发票信息 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1496561749_f7T6D :param card_id: 发票卡券的 Card ID :param encrypt_code: 发票卡券的加密 Code :param reimburse_status: 发票报销状态 """ return self._post( 'reimburse/updateinvoicestatus', data={ 'card_id': card_id, 'encrypt_code': encrypt_code, 'reimburse_status': reimburse_status, }, )
java
public static <T, A> Answer<T> toAnswer(final Answer1<T, A> answer) { return new Answer<T>() { @SuppressWarnings("unchecked") public T answer(InvocationOnMock invocation) throws Throwable { return answer.answer((A)invocation.getArgument(0)); } }; }
python
def publishApp(self, app_info, map_info=None, fsInfo=None): """Publishes apps to AGOL/Portal Args: app_info (list): A list of JSON configuration apps to publish. map_info (list): Defaults to ``None``. fsInfo (list): Defaults to ``None``. Returns: dict: A dictionary of results objects. """ if self.securityhandler is None: print ("Security handler required") return appDet = None try: app_results = [] if isinstance(app_info, list): for appDet in app_info: app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo)) else: app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo)) return app_results except (common.ArcRestHelperError) as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishApp", "line": line, "filename": filename, "synerror": synerror, }) finally: appDet = None del appDet gc.collect()
java
public void merge(ExtractionResult extractionResult) { for (Result result : extractionResult.results) { add(result.getObject(), result.getResultName()); } }
java
public static String getMD5Hash(String text) { MessageDigest md; byte[] md5hash = new byte[32]; try { md = MessageDigest.getInstance("MD5"); md.update(text.getBytes("iso-8859-1"), 0, text.length()); md5hash = md.digest(); } catch (NoSuchAlgorithmException e) { e.printStackTrace(); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } return convertToHex(md5hash); }
python
def get_parameters(self): """gets from all wrapped processors""" d = {} for p in self.processors: parameter_names = list(p.PARAMETERS.keys()) parameter_values = [getattr(p, n) for n in parameter_names] d.update(dict(zip(parameter_names, parameter_values))) return d
java
@Override protected void internalExecute() throws MojoExecutionException, MojoFailureException { if ( skipMain ) { getLog().info( "Not compiling main sources" ); return; } super.internalExecute(); if ( outputDirectory.isDirectory() ) { projectArtifact.setFile( outputDirectory ); } }
python
def approximate_controls(model, verbose=False, steady_state=None, eigmax=1.0-1e-6, solve_steady_state=False, order=1): """Compute first order approximation of optimal controls Parameters: ----------- model: NumericModel Model to be solved verbose: boolean If True: displays number of contracting eigenvalues steady_state: ndarray Use supplied steady-state value to compute the approximation. The routine doesn't check whether it is really a solution or not. solve_steady_state: boolean Use nonlinear solver to find the steady-state orders: {1} Approximation order. (Currently, only first order is supported). Returns: -------- TaylorExpansion: Decision Rule for the optimal controls around the steady-state. """ if order > 1: raise Exception("Not implemented.") f = model.functions['arbitrage'] g = model.functions['transition'] if steady_state is not None: calib = steady_state else: calib = model.calibration if solve_steady_state: calib = find_deterministic_equilibrium(model) p = calib['parameters'] s = calib['states'] x = calib['controls'] e = calib['shocks'] distrib = model.get_distribution() sigma = distrib.sigma l = g(s, x, e, p, diff=True) [junk, g_s, g_x, g_e] = l[:4] # [el[0,...] for el in l[:4]] l = f(s, x, e, s, x, p, diff=True) [res, f_s, f_x, f_e, f_S, f_X] = l # [el[0,...] for el in l[:6]] n_s = g_s.shape[0] # number of controls n_x = g_x.shape[1] # number of states n_e = g_e.shape[1] n_v = n_s + n_x A = row_stack([ column_stack([eye(n_s), zeros((n_s, n_x))]), column_stack([-f_S , -f_X ]) ]) B = row_stack([ column_stack([g_s, g_x]), column_stack([f_s, f_x]) ]) [S, T, Q, Z, eigval] = qzordered(A, B, 1.0-1e-8) Q = Q.real # is it really necessary ? Z = Z.real diag_S = np.diag(S) diag_T = np.diag(T) tol_geneigvals = 1e-10 try: ok = sum((abs(diag_S) < tol_geneigvals) * (abs(diag_T) < tol_geneigvals)) == 0 assert(ok) except Exception as e: raise GeneralizedEigenvaluesError(diag_S=diag_S, diag_T=diag_T) if max(eigval[:n_s]) >= 1 and min(eigval[n_s:]) < 1: # BK conditions are met pass else: eigval_s = sorted(eigval, reverse=True) ev_a = eigval_s[n_s-1] ev_b = eigval_s[n_s] cutoff = (ev_a - ev_b)/2 if not ev_a > ev_b: raise GeneralizedEigenvaluesSelectionError( A=A, B=B, eigval=eigval, cutoff=cutoff, diag_S=diag_S, diag_T=diag_T, n_states=n_s ) import warnings if cutoff > 1: warnings.warn("Solution is not convergent.") else: warnings.warn("There are multiple convergent solutions. The one with the smaller eigenvalues was selected.") [S, T, Q, Z, eigval] = qzordered(A, B, cutoff) Z11 = Z[:n_s, :n_s] # Z12 = Z[:n_s, n_s:] Z21 = Z[n_s:, :n_s] # Z22 = Z[n_s:, n_s:] # S11 = S[:n_s, :n_s] # T11 = T[:n_s, :n_s] # first order solution # P = (solve(S11.T, Z11.T).T @ solve(Z11.T, T11.T).T) C = solve(Z11.T, Z21.T).T Q = g_e s = s.ravel() x = x.ravel() A = g_s + g_x @ C B = g_e dr = CDR([s, x, C]) dr.A = A dr.B = B dr.sigma = sigma return dr
python
def add_episode(db, aid, episode): """Add an episode.""" values = { 'aid': aid, 'type': episode.type, 'number': episode.number, 'title': episode.title, 'length': episode.length, } upsert(db, 'episode', ['aid', 'type', 'number'], values)
java
public static <T1, T2, T3> TriPredicate<T1, T2, T3> spy1st(TriPredicate<T1, T2, T3> predicate, Box<T1> param1) { return spy(predicate, Box.<Boolean>empty(), param1, Box.<T2>empty(), Box.<T3>empty()); }
java
public DateEnd setDateEnd(Date dateEnd, boolean hasTime) { DateEnd prop = (dateEnd == null) ? null : new DateEnd(dateEnd, hasTime); setDateEnd(prop); return prop; }
java
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { HttpServletRequest req = HttpServletRequest.class.cast(request); String url = req.getRequestURI(); LoggerFactory.getLogger(getClass()).debug("Request started: {}", url); long start = System.currentTimeMillis(); try { chain.doFilter(request, response); } finally { long delta = System.currentTimeMillis() - start; LoggerFactory.getLogger(getClass()).info("Request completed in {}ms: {}", delta, url); } }
java
@Override public CPOptionValue findByC_ERC(long companyId, String externalReferenceCode) throws NoSuchCPOptionValueException { CPOptionValue cpOptionValue = fetchByC_ERC(companyId, externalReferenceCode); if (cpOptionValue == null) { StringBundler msg = new StringBundler(6); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("companyId="); msg.append(companyId); msg.append(", externalReferenceCode="); msg.append(externalReferenceCode); msg.append("}"); if (_log.isDebugEnabled()) { _log.debug(msg.toString()); } throw new NoSuchCPOptionValueException(msg.toString()); } return cpOptionValue; }
python
def compile_resource(resource): """ Return compiled regex for resource matching """ return re.compile("^" + trim_resource(re.sub(r":(\w+)", r"(?P<\1>[\w-]+?)", resource)) + r"(\?(?P<querystring>.*))?$")
java
@Override public void open(Configuration configuration) throws Exception { // determine the offset commit mode this.offsetCommitMode = OffsetCommitModes.fromConfiguration( getIsAutoCommitEnabled(), enableCommitOnCheckpoints, ((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled()); // create the partition discoverer this.partitionDiscoverer = createPartitionDiscoverer( topicsDescriptor, getRuntimeContext().getIndexOfThisSubtask(), getRuntimeContext().getNumberOfParallelSubtasks()); this.partitionDiscoverer.open(); subscribedPartitionsToStartOffsets = new HashMap<>(); final List<KafkaTopicPartition> allPartitions = partitionDiscoverer.discoverPartitions(); if (restoredState != null) { for (KafkaTopicPartition partition : allPartitions) { if (!restoredState.containsKey(partition)) { restoredState.put(partition, KafkaTopicPartitionStateSentinel.EARLIEST_OFFSET); } } for (Map.Entry<KafkaTopicPartition, Long> restoredStateEntry : restoredState.entrySet()) { if (!restoredFromOldState) { // seed the partition discoverer with the union state while filtering out // restored partitions that should not be subscribed by this subtask if (KafkaTopicPartitionAssigner.assign( restoredStateEntry.getKey(), getRuntimeContext().getNumberOfParallelSubtasks()) == getRuntimeContext().getIndexOfThisSubtask()){ subscribedPartitionsToStartOffsets.put(restoredStateEntry.getKey(), restoredStateEntry.getValue()); } } else { // when restoring from older 1.1 / 1.2 state, the restored state would not be the union state; // in this case, just use the restored state as the subscribed partitions subscribedPartitionsToStartOffsets.put(restoredStateEntry.getKey(), restoredStateEntry.getValue()); } } if (filterRestoredPartitionsWithCurrentTopicsDescriptor) { subscribedPartitionsToStartOffsets.entrySet().removeIf(entry -> { if (!topicsDescriptor.isMatchingTopic(entry.getKey().getTopic())) { LOG.warn( "{} is removed from subscribed partitions since it is no longer associated with topics descriptor of current execution.", entry.getKey()); return true; } return false; }); } LOG.info("Consumer subtask {} will start reading {} partitions with offsets in restored state: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets); } else { // use the partition discoverer to fetch the initial seed partitions, // and set their initial offsets depending on the startup mode. // for SPECIFIC_OFFSETS and TIMESTAMP modes, we set the specific offsets now; // for other modes (EARLIEST, LATEST, and GROUP_OFFSETS), the offset is lazily determined // when the partition is actually read. switch (startupMode) { case SPECIFIC_OFFSETS: if (specificStartupOffsets == null) { throw new IllegalStateException( "Startup mode for the consumer set to " + StartupMode.SPECIFIC_OFFSETS + ", but no specific offsets were specified."); } for (KafkaTopicPartition seedPartition : allPartitions) { Long specificOffset = specificStartupOffsets.get(seedPartition); if (specificOffset != null) { // since the specified offsets represent the next record to read, we subtract // it by one so that the initial state of the consumer will be correct subscribedPartitionsToStartOffsets.put(seedPartition, specificOffset - 1); } else { // default to group offset behaviour if the user-provided specific offsets // do not contain a value for this partition subscribedPartitionsToStartOffsets.put(seedPartition, KafkaTopicPartitionStateSentinel.GROUP_OFFSET); } } break; case TIMESTAMP: if (startupOffsetsTimestamp == null) { throw new IllegalStateException( "Startup mode for the consumer set to " + StartupMode.TIMESTAMP + ", but no startup timestamp was specified."); } for (Map.Entry<KafkaTopicPartition, Long> partitionToOffset : fetchOffsetsWithTimestamp(allPartitions, startupOffsetsTimestamp).entrySet()) { subscribedPartitionsToStartOffsets.put( partitionToOffset.getKey(), (partitionToOffset.getValue() == null) // if an offset cannot be retrieved for a partition with the given timestamp, // we default to using the latest offset for the partition ? KafkaTopicPartitionStateSentinel.LATEST_OFFSET // since the specified offsets represent the next record to read, we subtract // it by one so that the initial state of the consumer will be correct : partitionToOffset.getValue() - 1); } break; default: for (KafkaTopicPartition seedPartition : allPartitions) { subscribedPartitionsToStartOffsets.put(seedPartition, startupMode.getStateSentinel()); } } if (!subscribedPartitionsToStartOffsets.isEmpty()) { switch (startupMode) { case EARLIEST: LOG.info("Consumer subtask {} will start reading the following {} partitions from the earliest offsets: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); break; case LATEST: LOG.info("Consumer subtask {} will start reading the following {} partitions from the latest offsets: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); break; case TIMESTAMP: LOG.info("Consumer subtask {} will start reading the following {} partitions from timestamp {}: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), startupOffsetsTimestamp, subscribedPartitionsToStartOffsets.keySet()); break; case SPECIFIC_OFFSETS: LOG.info("Consumer subtask {} will start reading the following {} partitions from the specified startup offsets {}: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), specificStartupOffsets, subscribedPartitionsToStartOffsets.keySet()); List<KafkaTopicPartition> partitionsDefaultedToGroupOffsets = new ArrayList<>(subscribedPartitionsToStartOffsets.size()); for (Map.Entry<KafkaTopicPartition, Long> subscribedPartition : subscribedPartitionsToStartOffsets.entrySet()) { if (subscribedPartition.getValue() == KafkaTopicPartitionStateSentinel.GROUP_OFFSET) { partitionsDefaultedToGroupOffsets.add(subscribedPartition.getKey()); } } if (partitionsDefaultedToGroupOffsets.size() > 0) { LOG.warn("Consumer subtask {} cannot find offsets for the following {} partitions in the specified startup offsets: {}" + "; their startup offsets will be defaulted to their committed group offsets in Kafka.", getRuntimeContext().getIndexOfThisSubtask(), partitionsDefaultedToGroupOffsets.size(), partitionsDefaultedToGroupOffsets); } break; case GROUP_OFFSETS: LOG.info("Consumer subtask {} will start reading the following {} partitions from the committed group offsets in Kafka: {}", getRuntimeContext().getIndexOfThisSubtask(), subscribedPartitionsToStartOffsets.size(), subscribedPartitionsToStartOffsets.keySet()); } } else { LOG.info("Consumer subtask {} initially has no partitions to read from.", getRuntimeContext().getIndexOfThisSubtask()); } } }
python
def simple_repr(obj: Any, attrnames: List[str], with_addr: bool = False, joiner: str = COMMA_SPACE) -> str: """ Convenience function for :func:`__repr__`. Works its way through a list of attribute names, and creates a ``repr()`` representation assuming that parameters to the constructor have the same names. Args: obj: object to display attrnames: names of attributes to include with_addr: include the memory address of ``obj`` joiner: string with which to join the elements Returns: string: :func:`repr`-style representation """ elements = ["{}={}".format(name, repr(getattr(obj, name))) for name in attrnames] return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
python
def argsort2(indexable, key=None, reverse=False): """ Returns the indices that would sort a indexable object. This is similar to np.argsort, but it is written in pure python and works on both lists and dictionaries. Args: indexable (list or dict): indexable to sort by Returns: list: indices: list of indices such that sorts the indexable Example: >>> # DISABLE_DOCTEST >>> import utool as ut >>> # argsort works on dicts >>> dict_ = indexable = {'a': 3, 'b': 2, 'c': 100} >>> indices = ut.argsort2(indexable) >>> assert list(ut.take(dict_, indices)) == sorted(dict_.values()) >>> # argsort works on lists >>> indexable = [100, 2, 432, 10] >>> indices = ut.argsort2(indexable) >>> assert list(ut.take(indexable, indices)) == sorted(indexable) >>> # argsort works on iterators >>> indexable = reversed(range(100)) >>> indices = ut.argsort2(indexable) >>> assert indices[0] == 99 """ # Create an iterator of value/key pairs if isinstance(indexable, dict): vk_iter = ((v, k) for k, v in indexable.items()) else: vk_iter = ((v, k) for k, v in enumerate(indexable)) # Sort by values and extract the keys if key is None: indices = [k for v, k in sorted(vk_iter, reverse=reverse)] else: indices = [k for v, k in sorted(vk_iter, key=lambda vk: key(vk[0]), reverse=reverse)] return indices
python
def get_model_class( klass, api = None, use_request_api = True): """ Generates the Model Class based on the klass loads automatically the corresponding json schema file form schemes folder :param klass: json schema filename :param use_request_api: if True autoinitializes request class if api is None :param api: the transportation api if none the default settings are taken an instantiated """ if api is None and use_request_api: api = APIClient() _type = klass if isinstance(klass, dict): _type = klass['type'] schema = loaders.load_schema_raw(_type) model_cls = model_factory(schema, base_class = RemoteResource) model_cls.__api__ = api return model_cls
java
public static String canonicalize(String str) { if (str == null) return null; int length = str.length(); char ch; StringBuffer buf = new StringBuffer(length); for (int i=0;i<length;i++) { ch = str.charAt(i); if (ch == '_') continue; buf.append( Character.toLowerCase(ch) ); } return buf.toString(); }
python
def binary(self): """ return encoded representation """ if isinstance(self.value, int): return b_chr(_TAG_ATOM_CACHE_REF) + b_chr(self.value) elif isinstance(self.value, TypeUnicode): value_encoded = self.value.encode('utf-8') length = len(value_encoded) if length <= 255: return ( b_chr(_TAG_SMALL_ATOM_UTF8_EXT) + b_chr(length) + value_encoded ) elif length <= 65535: return ( b_chr(_TAG_ATOM_UTF8_EXT) + struct.pack(b'>H', length) + value_encoded ) else: raise OutputException('uint16 overflow') elif isinstance(self.value, bytes): length = len(self.value) if length <= 255: return b_chr(_TAG_SMALL_ATOM_EXT) + b_chr(length) + self.value elif length <= 65535: return ( b_chr(_TAG_ATOM_EXT) + struct.pack(b'>H', length) + self.value ) else: raise OutputException('uint16 overflow') else: raise OutputException('unknown atom type')
python
def precise_diff(d1, d2): """ Calculate a precise difference between two datetimes. :param d1: The first datetime :type d1: datetime.datetime or datetime.date :param d2: The second datetime :type d2: datetime.datetime or datetime.date :rtype: PreciseDiff """ sign = 1 if d1 == d2: return PreciseDiff(0, 0, 0, 0, 0, 0, 0, 0) tzinfo1 = d1.tzinfo if isinstance(d1, datetime.datetime) else None tzinfo2 = d2.tzinfo if isinstance(d2, datetime.datetime) else None if ( tzinfo1 is None and tzinfo2 is not None or tzinfo2 is None and tzinfo1 is not None ): raise ValueError( "Comparison between naive and aware datetimes is not supported" ) if d1 > d2: d1, d2 = d2, d1 sign = -1 d_diff = 0 hour_diff = 0 min_diff = 0 sec_diff = 0 mic_diff = 0 total_days = _day_number(d2.year, d2.month, d2.day) - _day_number( d1.year, d1.month, d1.day ) in_same_tz = False tz1 = None tz2 = None # Trying to figure out the timezone names # If we can't find them, we assume different timezones if tzinfo1 and tzinfo2: if hasattr(tzinfo1, "name"): # Pendulum timezone tz1 = tzinfo1.name elif hasattr(tzinfo1, "zone"): # pytz timezone tz1 = tzinfo1.zone if hasattr(tzinfo2, "name"): tz2 = tzinfo2.name elif hasattr(tzinfo2, "zone"): tz2 = tzinfo2.zone in_same_tz = tz1 == tz2 and tz1 is not None if isinstance(d2, datetime.datetime): if isinstance(d1, datetime.datetime): # If we are not in the same timezone # we need to adjust # # We also need to adjust if we do not # have variable-length units if not in_same_tz or total_days == 0: offset1 = d1.utcoffset() offset2 = d2.utcoffset() if offset1: d1 = d1 - offset1 if offset2: d2 = d2 - offset2 hour_diff = d2.hour - d1.hour min_diff = d2.minute - d1.minute sec_diff = d2.second - d1.second mic_diff = d2.microsecond - d1.microsecond else: hour_diff = d2.hour min_diff = d2.minute sec_diff = d2.second mic_diff = d2.microsecond if mic_diff < 0: mic_diff += 1000000 sec_diff -= 1 if sec_diff < 0: sec_diff += 60 min_diff -= 1 if min_diff < 0: min_diff += 60 hour_diff -= 1 if hour_diff < 0: hour_diff += 24 d_diff -= 1 y_diff = d2.year - d1.year m_diff = d2.month - d1.month d_diff += d2.day - d1.day if d_diff < 0: year = d2.year month = d2.month if month == 1: month = 12 year -= 1 else: month -= 1 leap = int(is_leap(year)) days_in_last_month = DAYS_PER_MONTHS[leap][month] days_in_month = DAYS_PER_MONTHS[int(is_leap(d2.year))][d2.month] if d_diff < days_in_month - days_in_last_month: # We don't have a full month, we calculate days if days_in_last_month < d1.day: d_diff += d1.day else: d_diff += days_in_last_month elif d_diff == days_in_month - days_in_last_month: # We have exactly a full month # We remove the days difference # and add one to the months difference d_diff = 0 m_diff += 1 else: # We have a full month d_diff += days_in_last_month m_diff -= 1 if m_diff < 0: m_diff += 12 y_diff -= 1 return PreciseDiff( sign * y_diff, sign * m_diff, sign * d_diff, sign * hour_diff, sign * min_diff, sign * sec_diff, sign * mic_diff, sign * total_days, )
java
public Regex group(String name) { return re("(" + (StringUtils.isNotNullOrBlank(name) ? "?<" + name + ">" : StringUtils.EMPTY) + pattern + ")"); }
python
def send_short_lpp_packet(self, dest_id, data): """ Send ultra-wide-band LPP packet to dest_id """ pk = CRTPPacket() pk.port = CRTPPort.LOCALIZATION pk.channel = self.GENERIC_CH pk.data = struct.pack('<BB', self.LPS_SHORT_LPP_PACKET, dest_id) + data self._cf.send_packet(pk)
python
def join(self): """ Wait for all task to finish """ pending = set() exceptions = set() while len(self._tasks) > 0 or len(pending) > 0: while len(self._tasks) > 0 and len(pending) < self._concurrency: task, args, kwargs = self._tasks.pop(0) pending.add(task(*args, **kwargs)) (done, pending) = yield from asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) for task in done: if task.exception(): exceptions.add(task.exception()) if len(exceptions) > 0: raise exceptions.pop()
python
async def get_participant(self, p_id: int, force_update=False) -> Participant: """ get a participant by its id |methcoro| Args: p_id: participant id force_update (dfault=False): True to force an update to the Challonge API Returns: Participant: None if not found Raises: APIException """ found_p = self._find_participant(p_id) if force_update or found_p is None: await self.get_participants() found_p = self._find_participant(p_id) return found_p
java
@Override public Request<DeleteLaunchTemplateVersionsRequest> getDryRunRequest() { Request<DeleteLaunchTemplateVersionsRequest> request = new DeleteLaunchTemplateVersionsRequestMarshaller().marshall(this); request.addParameter("DryRun", Boolean.toString(true)); return request; }
python
def _url_for_email(endpoint, base_url=None, **kw): """ Create an external url_for by using a custom base_url different from the domain we are on :param endpoint: :param base_url: :param kw: :return: """ base_url = base_url or config("MAIL_EXTERNAL_BASE_URL") _external = True if not base_url else False url = url_for(endpoint, _external=_external, **kw) if base_url and not _external: url = "%s/%s" % (base_url.strip("/"), url.lstrip("/")) return url
python
def fast_sync_inspect_snapshot( snapshot_path ): """ Inspect a snapshot Return useful information Return {'status': True, 'signatures': ..., 'payload_size': ..., 'sig_append_offset': ..., 'hash': ...} on success Return {'error': ...} on error """ with open(snapshot_path, 'r') as f: info = fast_sync_inspect( f ) if 'error' in info: log.error("Failed to inspect snapshot {}: {}".format(snapshot_path, info['error'])) return {'error': 'Failed to inspect snapshot'} # get the hash of the file hash_hex = get_file_hash(f, hashlib.sha256, fd_len=info['payload_size']) info['hash'] = hash_hex return info
java
public void start() { Preconditions.checkState(Thread.currentThread().equals(ourThread), "Not in the correct thread"); client.addParentWatcher(watcher); }
python
def _ParseProcessingOptions(self, options): """Parses the processing options. Args: options (argparse.Namespace): command line arguments. Raises: BadConfigOption: if the options are invalid. """ self._single_process_mode = getattr(options, 'single_process', False) argument_helper_names = [ 'process_resources', 'temporary_directory', 'workers', 'zeromq'] helpers_manager.ArgumentHelperManager.ParseOptions( options, self, names=argument_helper_names)
java
public <S extends Storable> void resync(Class<S> type, double desiredSpeed, String filter, Object... filterValues) throws RepositoryException { resync(type, null, desiredSpeed, filter, filterValues); }
java
protected List<OUT> executeOnCollections(RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { @SuppressWarnings("unchecked") InputFormat<OUT, InputSplit> inputFormat = (InputFormat<OUT, InputSplit>) this.formatWrapper.getUserCodeObject(); //configure the input format inputFormat.configure(this.parameters); //open the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).setRuntimeContext(ctx); ((RichInputFormat) inputFormat).openInputFormat(); } List<OUT> result = new ArrayList<OUT>(); // splits InputSplit[] splits = inputFormat.createInputSplits(1); TypeSerializer<OUT> serializer = getOperatorInfo().getOutputType().createSerializer(executionConfig); for (InputSplit split : splits) { inputFormat.open(split); while (!inputFormat.reachedEnd()) { OUT next = inputFormat.nextRecord(serializer.createInstance()); if (next != null) { result.add(serializer.copy(next)); } } inputFormat.close(); } //close the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).closeInputFormat(); } return result; }
java
@Override public CommerceOrderItem fetchByC_S_First(long commerceOrderId, boolean subscription, OrderByComparator<CommerceOrderItem> orderByComparator) { List<CommerceOrderItem> list = findByC_S(commerceOrderId, subscription, 0, 1, orderByComparator); if (!list.isEmpty()) { return list.get(0); } return null; }
python
def delete(self, id: int): """ Delete asset class """ assert isinstance(id, int) self.open_session() to_delete = self.get(id) self.session.delete(to_delete) self.save()
python
def solve(self,b,overwrite_b=False,check_finite=True, p=None): """ solve A \ b """ if p is None: assert b.shape[:2]==(len(self.solver),self.dof_any) solution = np.empty(b.shape) #This is trivially parallelizable: for p in range(self.P): solution[p] = self.solver[p].solve(b=b[p]) return solution else: return self.solver[p].solve(b=b)
python
def check_and_set_unreachability(self, hosts, services): """ Check if all dependencies are down, if yes set this object as unreachable. todo: this function do not care about execution_failure_criteria! :param hosts: hosts objects, used to get object in act_depend_of :type hosts: alignak.objects.host.Hosts :param services: services objects, used to get object in act_depend_of :type services: alignak.objects.service.Services :return: None """ parent_is_down = [] for (dep_id, _, _, _) in self.act_depend_of: if dep_id in hosts: dep = hosts[dep_id] else: dep = services[dep_id] if dep.state in ['d', 'DOWN', 'c', 'CRITICAL', 'u', 'UNKNOWN', 'x', 'UNREACHABLE']: parent_is_down.append(True) else: parent_is_down.append(False) if False in parent_is_down: return # all parents down self.set_unreachable()
java
public long[] asEpochSecondArray(ZoneOffset offset) { long[] output = new long[data.size()]; for (int i = 0; i < data.size(); i++) { LocalDateTime dateTime = PackedLocalDateTime.asLocalDateTime(data.getLong(i)); if (dateTime == null) { output[i] = Long.MIN_VALUE; } else { output[i] = dateTime.toEpochSecond(offset); } } return output; }
java
public void copyVendorExtensions(Schema source, Schema target) { if (source.getExtensions() != null) { Map<String, Object> vendorExtensions = source.getExtensions(); for (String extName : vendorExtensions.keySet()) { ((SchemaImpl) target).addExtension_compat(extName, vendorExtensions.get(extName)); } } }
java
public String compile(JoinableResourceBundle bundle, String content, String path, GeneratorContext context) { JawrLessSource source = new JawrLessSource(bundle, content, path, rsHandler); try { CompilationResult result = compiler.compile(source, lessConfig); addLinkedResources(path, context, source.getLinkedResources()); return result.getCss(); } catch (Less4jException e) { throw new BundlingProcessException("Unable to generate content for resource path : '" + path + "'", e); } }
python
def SLICE_0(self, instr): 'obj[:]' value = self.ast_stack.pop() kw = dict(lineno=instr.lineno, col_offset=0) slice = _ast.Slice(lower=None, step=None, upper=None, **kw) subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw) self.ast_stack.append(subscr)
python
def read32(bytestream): """Read 4 bytes from bytestream as an unsigned 32-bit integer.""" dt = np.dtype(np.uint32).newbyteorder('>') return np.frombuffer(bytestream.read(4), dtype=dt)[0]
python
def plot(self, x, y, **kw): """plot x, y values (erasing old plot), for method options see PlotPanel.plot. """ return self.frame.plot(x,y,**kw)
python
def stop_recording(self): """ Stops writing video to file. """ if not self._recording: raise Exception("Cannot stop a video recording when it's not recording!") self._cmd_q.put(('stop',)) self._recording = False
java
@Override public com.liferay.commerce.user.segment.model.CommerceUserSegmentCriterion deleteCommerceUserSegmentCriterion( com.liferay.commerce.user.segment.model.CommerceUserSegmentCriterion commerceUserSegmentCriterion) throws com.liferay.portal.kernel.exception.PortalException { return _commerceUserSegmentCriterionLocalService.deleteCommerceUserSegmentCriterion(commerceUserSegmentCriterion); }
python
def pipe_count(context=None, _INPUT=None, conf=None, **kwargs): """An operator that counts the number of _INPUT items and yields it forever. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : not used Yields ------ _OUTPUT : number of items in the feed Examples -------- >>> generator = (x for x in xrange(5)) >>> count = pipe_count(_INPUT=generator) >>> count #doctest: +ELLIPSIS <generator object pipe_count at 0x...> >>> count.next() 5 """ count = len(list(_INPUT)) # todo: check all operators (not placeable in loops) while True: yield count
python
def remove(self, rel_path, propagate=False): '''Delete the file from the cache, and from the upstream''' repo_path = os.path.join(self.cache_dir, rel_path) if os.path.exists(repo_path): os.remove(repo_path) if self.upstream and propagate: self.upstream.remove(rel_path, propagate)
java
public static <L> Builder<L> builder(Resource<L> template) { return new Builder<L>(template); }
python
def apply_color(self, arr, state): """Apply color formula to an array.""" ops = self.cmd(state) for func in parse_operations(ops): arr = func(arr) return arr
java
public void into(@NonNull RemoteViews remoteViews, @IdRes int viewId, int notificationId, @NonNull Notification notification, @Nullable String notificationTag) { into(remoteViews, viewId, notificationId, notification, notificationTag, null); }
python
def parent(self): "Get this object's parent" if self._parent: return self._parent # auto-compute parent if needed elif getattr(self, '__parent_type__', None): return self._get_subfolder('..' if self._url[2].endswith('/') else '.', self.__parent_type__) else: raise AttributeError("%r has no parent attribute" % type(self))
python
def bioul_tags_to_spans(tag_sequence: List[str], classes_to_ignore: List[str] = None) -> List[TypedStringSpan]: """ Given a sequence corresponding to BIOUL tags, extracts spans. Spans are inclusive and can be of zero length, representing a single word span. Ill-formed spans are not allowed and will raise ``InvalidTagSequence``. This function works properly when the spans are unlabeled (i.e., your labels are simply "B", "I", "O", "U", and "L"). Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in BIOUL, e.g. ["B-PER", "L-PER", "O"]. classes_to_ignore : ``List[str]``, optional (default = None). A list of string class labels `excluding` the bio tag which should be ignored when extracting spans. Returns ------- spans : ``List[TypedStringSpan]`` The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)). """ spans = [] classes_to_ignore = classes_to_ignore or [] index = 0 while index < len(tag_sequence): label = tag_sequence[index] if label[0] == 'U': spans.append((label.partition('-')[2], (index, index))) elif label[0] == 'B': start = index while label[0] != 'L': index += 1 if index >= len(tag_sequence): raise InvalidTagSequence(tag_sequence) label = tag_sequence[index] if not (label[0] == 'I' or label[0] == 'L'): raise InvalidTagSequence(tag_sequence) spans.append((label.partition('-')[2], (start, index))) else: if label != 'O': raise InvalidTagSequence(tag_sequence) index += 1 return [span for span in spans if span[0] not in classes_to_ignore]
java
private String createURLs( String jsonString ) { String urlPattern = "(?:(?:https?|file)://)[^\"\\r\\n]+"; jsonString = jsonString.replaceAll(urlPattern, "<a href=$0>$0</a>"); return jsonString; }
python
def loadUnStructuredGrid(filename): # not tested """Load a ``vtkunStructuredGrid`` object from file and return a ``Actor(vtkActor)`` object.""" reader = vtk.vtkUnstructuredGridReader() reader.SetFileName(filename) reader.Update() gf = vtk.vtkUnstructuredGridGeometryFilter() gf.SetInputConnection(reader.GetOutputPort()) gf.Update() return Actor(gf.GetOutput())
python
def FromManagedObject(self): """ Method creates and returns an object of _GenericMO class using the classId and other information from the managed object. """ import os if (isinstance(self.mo, ManagedObject) == True): self.classId = self.mo.classId if self.mo.getattr('Dn'): self.dn = self.mo.getattr('Dn') if self.mo.getattr('Rn'): self.rn = self.mo.getattr('Rn') elif self.dn: self.rn = os.path.basename(self.dn) for property in UcsUtils.GetUcsPropertyMetaAttributeList(self.mo.classId): self.properties[property] = self.mo.getattr(property) if len(self.mo.child): for ch in self.mo.child: if not ch.getattr('Dn'): _Dn = self.mo.getattr('Dn') + "/" + ch.getattr('Rn') ch.setattr('Dn', _Dn) gmo = _GenericMO(mo=ch) self.child.append(gmo)
java
@Override public ListQualificationRequestsResult listQualificationRequests(ListQualificationRequestsRequest request) { request = beforeClientExecution(request); return executeListQualificationRequests(request); }
java
public ServiceFuture<ExpressRoutePortInner> beginCreateOrUpdateAsync(String resourceGroupName, String expressRoutePortName, ExpressRoutePortInner parameters, final ServiceCallback<ExpressRoutePortInner> serviceCallback) { return ServiceFuture.fromResponse(beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, expressRoutePortName, parameters), serviceCallback); }
python
def accept_project_transfers(access_level,queue,org,share_with_org=None): """ Args: access_level: `str`. Permissions level the new member should have on transferred projects. Should be one of ["VIEW","UPLOAD","CONTRIBUTE","ADMINISTER"]. See https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing for more details on access levels. queue: `str`. The value of the queue property on a DNAnexus project. Only projects that are pending transfer that have this value for the queue property will be transferred to the specified org. org: `str`. The name of the DNAnexus org under which to accept the project transfers for projects that have their queue property set to the value of the 'queue' argument. share_with_org: `str`. Set this argument if you'd like to share the transferred projects with the org so that all users of the org will have access to the project. The value you supply should be the access level that members of the org will have. Returns: `dict`: The projects that were transferred to the specified billing account. Keys are the project IDs, and values are the project names. """ dx_username = gbsc_dnanexus.utils.get_dx_username() #gbsc_dnanexus.utils.log_into_dnanexus(dx_username) org = gbsc_dnanexus.utils.add_dx_orgprefix(org) pending_transfers = gbsc_dnanexus.utils.pending_transfers(dx_username) #pending_transfers is a list of project IDs transferred = {} for proj_id in pending_transfers: dx_proj = dxpy.DXProject(proj_id) props = dx_proj.describe(input_params={"properties": True})["properties"] try: project_queue = props["queue"] except KeyError: raise DxProjectMissingQueueProperty("DNAnexus project {proj_name} ({proj_id}) is missing the queue property.".format(proj_name=dx_proj.name,proj_id=proj_id)) if queue != project_queue: continue msg = "Accepting project transfer of {proj_name} ({proj_id}) for user {user}, to be billed under the org {org}.".format(proj_name=dx_proj.name,proj_id=proj_id,user=dx_username,org=org) debug_logger.debug(msg) dxpy.DXHTTPRequest("/" + proj_id + "/acceptTransfer", {"billTo": org }) success_logger.info(msg) transferred[proj_id] = dx_proj.name if share_with_org: msg = "Sharing project {proj_id} with {org} with access level {share_with_org}.".format(proj_id=proj_id,org=org,share_with_org=share_with_org) debug_logger.debug(msg) share_with_org(project_ids=[proj_id], org=org, access_level=share_with_org) dxpy.api.project_invite(object_id=proj_id,input_params={"invitee": org,"level": share_with_org,"suppressEmailNotification": True}) success_logger.info(msg) return transferred
java
@Override protected void logIncrementalStatsByAccount(String account, RunStats stats) { DuplicationRunStats dstats = (DuplicationRunStats) stats; log.info("Session stats by account (incremental): account={} dups={} deletes={}", account, dstats.getDups(), dstats.getDeletes()); }
java
public Response createCharsetResponse(CloseableHttpResponse httpResponse) throws IOException { HttpEntity entity = httpResponse.getEntity(); Charset charset = ContentType.getOrDefault(httpResponse.getEntity()).getCharset(); charset = (charset == null) ? Charset.defaultCharset() : charset; return Response .status(httpResponse.getStatusLine().getStatusCode()) .entity(entity != null ? IOUtils.toString(entity.getContent(), charset) : null) .build(); }
java
public void uploadDirectoryWithRetries(final String ftpServer, final String username, final String password, final String sourceDirectory, final String targetDirectory, final int maxRetryCount) throws MojoExecutionException { int retryCount = 0; while (retryCount < maxRetryCount) { retryCount++; log.info(UPLOAD_START + ftpServer); if (uploadDirectory(ftpServer, username, password, sourceDirectory, targetDirectory)) { log.info(UPLOAD_SUCCESS + ftpServer); return; } else { log.warn(String.format(UPLOAD_FAILURE, retryCount, maxRetryCount)); } } // Reaching here means all retries failed. throw new MojoExecutionException(String.format(UPLOAD_RETRY_FAILURE, maxRetryCount)); }
python
def main(argv=None): """Run a Tensorflow model on the Iris dataset.""" args = parse_arguments(sys.argv if argv is None else argv) tf.logging.set_verbosity(tf.logging.INFO) learn_runner.run( experiment_fn=get_experiment_fn(args), output_dir=args.job_dir)
python
def list_tickets(self, **kwargs): """List all tickets, optionally filtered by a view. Specify filters as keyword arguments, such as: filter_name = one of ['all_tickets', 'new_my_open', 'spam', 'deleted', None] (defaults to 'all_tickets'; passing None uses the default) Multiple filters are AND'd together. """ filter_name = 'all_tickets' if 'filter_name' in kwargs and kwargs['filter_name'] is not None: filter_name = kwargs['filter_name'] del kwargs['filter_name'] url = 'helpdesk/tickets/filter/%s?format=json' % filter_name page = 1 tickets = [] # Skip pagination by looping over each page and adding tickets while True: this_page = self._api._get(url + '&page=%d' % page, kwargs) if len(this_page) == 0: break tickets += this_page page += 1 return [self.get_ticket(t['display_id']) for t in tickets]
python
def lemmatize(self): """Return the lemma of each word in this WordList. Currently using NLTKPunktTokenizer() for all lemmatization tasks. This might cause slightly different tokenization results compared to the TextBlob.words property. """ _lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer()) # WordList object --> Sentence.string # add a period (improves parser accuracy) _raw = " ".join(self) + "." _lemmas = _lemmatizer.lemmatize(_raw) return self.__class__([Word(l, t) for l, t in _lemmas])
python
def reasoning_routine(self, groups, check, priority_flag=3, _top_level=True): """ print routine performed @param list groups: the Result groups @param str check: checker name @param int priority_flag: indicates the weight of the groups @param bool _top_level: indicates the level of the group so as to print out the appropriate header string """ sort_fn = lambda x: x.weight groups_sorted = sorted(groups, key=sort_fn, reverse=True) # create dict of the groups -> {level: [reasons]} result = {key: [v for v in valuesiter if v.value[0] != v.value[1]] for key, valuesiter in itertools.groupby(groups_sorted, key=sort_fn)} priorities = self.checkers[check]._cc_display_headers def process_table(res, check): """Recursively calls reasoning_routine to parse out child reasons from the parent reasons. @param Result res: Result object @param str check: checker name""" issue = res.name if not res.children: reasons = res.msgs else: child_reasons = self.reasoning_routine(res.children, check, _top_level=False) # there shouldn't be messages if there are children # is this a valid assumption? reasons = child_reasons return issue, reasons # iterate in reverse to the min priority requested; # the higher the limit, the more lenient the output proc_strs = "" for level in range(3, priority_flag - 1, -1): level_name = priorities.get(level, level) # print headers proc_strs = [] # skip any levels that aren't in the result if level not in result: continue # skip any empty result levels if len(result[level]) > 0: # only print priority headers at top level, i.e. non-child # datasets if _top_level: width = 2 * self.col_width print("\n") print('{:^{width}}'.format(level_name, width=width)) print("-" * width) data_issues = [process_table(res, check) for res in result[level]] has_printed = False for issue, reasons in data_issues: # if this isn't the first printed issue, add a newline # separating this and the previous level if has_printed: print("") # join alphabetized reasons together reason_str = "\n".join('* {}'.format(r) for r in sorted(reasons, key=lambda x: x[0])) proc_str = "{}\n{}".format(issue, reason_str) print(proc_str) proc_strs.append(proc_str) has_printed = True return "\n".join(proc_strs)
python
def _init_org(self): """ Test and refresh credentials to the org specified. """ self.logger.info( "Verifying and refreshing credentials for the specified org: {}.".format( self.org_config.name ) ) orig_config = self.org_config.config.copy() # attempt to refresh the token, this can throw... self.org_config.refresh_oauth_token(self.project_config.keychain) if self.org_config.config != orig_config: self.logger.info("Org info has changed, updating org in keychain") self.project_config.keychain.set_org(self.org_config)
java
protected String formatHTTPURLParameters(HTTPFaxClientSpi faxClientSpi,FaxJob faxJob) { //get URL parameters String urlParametersTemplate=faxClientSpi.getHTTPURLParameters(); //format URL parameters String urlParameters=SpiUtil.formatTemplate(urlParametersTemplate,faxJob,SpiUtil.URL_ENCODER,false,false); return urlParameters; }
java
public static URI getRelativePath(final URI relativePath) { final StringTokenizer tokenizer = new StringTokenizer(relativePath.toString(), URI_SEPARATOR); final StringBuilder buffer = new StringBuilder(); if (tokenizer.countTokens() == 1) { return null; } else { while(tokenizer.countTokens() > 1) { tokenizer.nextToken(); buffer.append(".."); buffer.append(URI_SEPARATOR); } return toURI(buffer.toString()); } }
python
def _FormatSocketExToken(self, token_data): """Formats an extended socket token as a dictionary of values. Args: token_data (bsm_token_data_socket_ex): AUT_SOCKET_EX token data. Returns: dict[str, str]: token values. """ if token_data.socket_domain == 10: local_ip_address = self._FormatPackedIPv6Address( token_data.local_ip_address) remote_ip_address = self._FormatPackedIPv6Address( token_data.remote_ip_address) else: local_ip_address = self._FormatPackedIPv4Address( token_data.local_ip_address) remote_ip_address = self._FormatPackedIPv4Address( token_data.remote_ip_address) return { 'from': local_ip_address, 'from_port': token_data.local_port, 'to': remote_ip_address, 'to_port': token_data.remote_port}
python
def validate_request(self, uri, http_method='GET', body=None, headers=None): """Validate a signed OAuth request. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object. """ try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error as err: log.info( 'Exception caught while validating request, %s.' % err) return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error as err: log.info( 'Exception caught while validating request, %s.' % err) return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request): log.debug('[Failure] verification failed: timestamp/nonce') return False, request # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid client credentials. # Note: This is postponed in order to avoid timing attacks, instead # a dummy client is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client valid_signature = self._check_signature(request) # log the results to the validator_log # this lets us handle internal reporting and analysis request.validator_log['client'] = valid_client request.validator_log['signature'] = valid_signature # We delay checking validity until the very end, using dummy values for # calculations and fetching secrets/keys to ensure the flow of every # request remains almost identical regardless of whether valid values # have been supplied. This ensures near constant time execution and # prevents malicious users from guessing sensitive information v = all((valid_client, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid signature: %s", valid_signature) return v, request
java
public static final String encode(String str, Charset charset) { return encode(str, charset.name()); }
python
def list_scores(self, update_keys, session=None, lightweight=None): """ Returns a list of current scores for the given events. :param list update_keys: The filter to select desired markets. All markets that match the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}] :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.Score] """ params = clean_locals(locals()) method = '%s%s' % (self.URI, 'listScores') (response, elapsed_time) = self.request(method, params, session) return self.process_response(response, resources.Score, elapsed_time, lightweight)
java
public SmtpProtocol createSmtpProtocolFromString(EDataType eDataType, String initialValue) { SmtpProtocol result = SmtpProtocol.get(initialValue); if (result == null) throw new IllegalArgumentException("The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'"); return result; }
java
public static Class requiredClassAttribute( final XMLStreamReader reader, final String namespace, final String localName) throws XMLStreamException { final String value = reader.getAttributeValue(namespace, localName); if (value != null) { try { return Class.forName(value.toString()); } catch (final ClassNotFoundException e) { throw createXMLStreamException( MessageFormat.format("\"{0}\" is not a valid class name.", value), reader, e); } } throw new XMLStreamException( MessageFormat.format("Attribute {0}:{1} is required", namespace, localName)); }
java
public ServiceCall<Grammar> getGrammar(GetGrammarOptions getGrammarOptions) { Validator.notNull(getGrammarOptions, "getGrammarOptions cannot be null"); String[] pathSegments = { "v1/customizations", "grammars" }; String[] pathParameters = { getGrammarOptions.customizationId(), getGrammarOptions.grammarName() }; RequestBuilder builder = RequestBuilder.get(RequestBuilder.constructHttpUrl(getEndPoint(), pathSegments, pathParameters)); Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("speech_to_text", "v1", "getGrammar"); for (Entry<String, String> header : sdkHeaders.entrySet()) { builder.header(header.getKey(), header.getValue()); } builder.header("Accept", "application/json"); return createServiceCall(builder.build(), ResponseConverterUtils.getObject(Grammar.class)); }
java
public boolean isSpinnerTextSelected(String text) { if(config.commandLogging){ Log.d(config.commandLoggingTag, "isSpinnerTextSelected(\""+text+"\")"); } return checker.isSpinnerTextSelected(text); }
python
def _get_query_argument(args, cell, env): """ Get a query argument to a cell magic. The query is specified with args['query']. We look that up and if it is a BQ query just return it. If it is instead a SqlModule or SqlStatement it may have variable references. We resolve those using the arg parser for the SqlModule, then override the resulting defaults with either the Python code in cell, or the dictionary in overrides. The latter is for if the overrides are specified with YAML or JSON and eventually we should eliminate code in favor of this. Args: args: the dictionary of magic arguments. cell: the cell contents which can be variable value overrides (if args has a 'query' value) or inline SQL otherwise. env: a dictionary that is used for looking up variable values. Returns: A Query object. """ sql_arg = args.get('query', None) if sql_arg is None: # Assume we have inline SQL in the cell if not isinstance(cell, basestring): raise Exception('Expected a --query argument or inline SQL') return datalab.bigquery.Query(cell, values=env) item = datalab.utils.commands.get_notebook_item(sql_arg) if isinstance(item, datalab.bigquery.Query): # Queries are already expanded. return item # Create an expanded BQ Query. config = datalab.utils.commands.parse_config(cell, env) item, env = datalab.data.SqlModule.get_sql_statement_with_environment(item, config) if cell: env.update(config) # config is both a fallback and an override. return datalab.bigquery.Query(item, values=env)
python
def memory(self): """Property to provide reference to `MemoryCollection` instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return memory.MemoryCollection( self._conn, utils.get_subresource_path_by(self, 'Memory'), redfish_version=self.redfish_version)