language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public void emit(ComponentInfo componentInfo, EmitInfo info) { if (this.sessions.size() == 0) { return; } Date nowTime = new Date(); DateFormat format = new SimpleDateFormat(DATE_PATTERN); String nowTimeStr = format.format(nowTime); Map<String, Object> objectMap = Maps.newHashMap(); objectMap.put("time", nowTimeStr); objectMap.put("component", componentInfo); objectMap.put("emitinfo", info); String logInfo; try { logInfo = this.mapper.writeValueAsString(objectMap); } catch (JsonProcessingException ex) { String msgFormat = "Event convert failed. Skip log output. : ComponentInfo={0}, EmitInfo={1}"; String message = MessageFormat.format(msgFormat, componentInfo, ToStringBuilder.reflectionToString(info, ToStringStyle.SHORT_PREFIX_STYLE)); logger.warn(message, ex); return; } for (Entry<String, Session> entry : this.sessions.entrySet()) { entry.getValue().getAsyncRemote().sendText(logInfo, this.handler); } }
python
def _ensure_format(rule, attribute, res_dict): """Verifies that attribute in res_dict is properly formatted. Since, in the .ini-files, lists are specified as ':' separated text and UUID values can be plain integers we need to transform any such values into proper format. Empty strings are converted to None if validator specifies that None value is accepted. """ if rule == 'type:uuid' or (rule == 'type:uuid_or_none' and res_dict[attribute]): res_dict[attribute] = uuidify(res_dict[attribute]) elif rule == 'type:uuid_list': if not res_dict[attribute]: res_dict[attribute] = [] else: temp_list = res_dict[attribute].split(':') res_dict[attribute] = [] for item in temp_list: res_dict[attribute].append = uuidify(item) elif rule == 'type:string_or_none' and res_dict[attribute] == "": res_dict[attribute] = None
java
public Observable<ServiceResponse<List<IntentsSuggestionExample>>> getIntentSuggestionsWithServiceResponseAsync(UUID appId, String versionId, UUID intentId, GetIntentSuggestionsOptionalParameter getIntentSuggestionsOptionalParameter) { if (this.client.endpoint() == null) { throw new IllegalArgumentException("Parameter this.client.endpoint() is required and cannot be null."); } if (appId == null) { throw new IllegalArgumentException("Parameter appId is required and cannot be null."); } if (versionId == null) { throw new IllegalArgumentException("Parameter versionId is required and cannot be null."); } if (intentId == null) { throw new IllegalArgumentException("Parameter intentId is required and cannot be null."); } final Integer take = getIntentSuggestionsOptionalParameter != null ? getIntentSuggestionsOptionalParameter.take() : null; return getIntentSuggestionsWithServiceResponseAsync(appId, versionId, intentId, take); }
python
def find_section(self, charindex): """Returns a value indicating whether the specified character index is owned by the current object.""" #All objects instances of decorable also inherit from CodeElement, #so we should have no problem accessing the start and end attributes. result = None if hasattr(self, "start") and hasattr(self, "end"): #The 8 seems arbitrary, but it is the length of type::b\n for a #really short type declaration with one character name. if charindex > self.docend and charindex - self.start < 8: result = "signature" elif charindex >= self.start and charindex <= self.end: result = "body" if (result is None and charindex >= self.docstart and charindex <= self.docend): result = "docstring" return result
java
public ApiResponse<ApiSuccessResponse> searchContactsWithHttpInfo(LuceneSearchData luceneSearchData) throws ApiException { com.squareup.okhttp.Call call = searchContactsValidateBeforeCall(luceneSearchData, null, null); Type localVarReturnType = new TypeToken<ApiSuccessResponse>(){}.getType(); return apiClient.execute(call, localVarReturnType); }
java
public String[] values(String name) { final HttpHeader header = getHeader(name); return header == null ? null : header.values(); }
java
public void mTokens() throws RecognitionException { // InternalXtype.g:1:8: ( T__10 | T__11 | T__12 | T__13 | T__14 | T__15 | T__16 | T__17 | T__18 | T__19 | T__20 | T__21 | T__22 | T__23 | T__24 | T__25 | T__26 | T__27 | RULE_ID | RULE_STRING | RULE_ML_COMMENT | RULE_SL_COMMENT | RULE_WS | RULE_ANY_OTHER ) int alt13=24; alt13 = dfa13.predict(input); switch (alt13) { case 1 : // InternalXtype.g:1:10: T__10 { mT__10(); } break; case 2 : // InternalXtype.g:1:16: T__11 { mT__11(); } break; case 3 : // InternalXtype.g:1:22: T__12 { mT__12(); } break; case 4 : // InternalXtype.g:1:28: T__13 { mT__13(); } break; case 5 : // InternalXtype.g:1:34: T__14 { mT__14(); } break; case 6 : // InternalXtype.g:1:40: T__15 { mT__15(); } break; case 7 : // InternalXtype.g:1:46: T__16 { mT__16(); } break; case 8 : // InternalXtype.g:1:52: T__17 { mT__17(); } break; case 9 : // InternalXtype.g:1:58: T__18 { mT__18(); } break; case 10 : // InternalXtype.g:1:64: T__19 { mT__19(); } break; case 11 : // InternalXtype.g:1:70: T__20 { mT__20(); } break; case 12 : // InternalXtype.g:1:76: T__21 { mT__21(); } break; case 13 : // InternalXtype.g:1:82: T__22 { mT__22(); } break; case 14 : // InternalXtype.g:1:88: T__23 { mT__23(); } break; case 15 : // InternalXtype.g:1:94: T__24 { mT__24(); } break; case 16 : // InternalXtype.g:1:100: T__25 { mT__25(); } break; case 17 : // InternalXtype.g:1:106: T__26 { mT__26(); } break; case 18 : // InternalXtype.g:1:112: T__27 { mT__27(); } break; case 19 : // InternalXtype.g:1:118: RULE_ID { mRULE_ID(); } break; case 20 : // InternalXtype.g:1:126: RULE_STRING { mRULE_STRING(); } break; case 21 : // InternalXtype.g:1:138: RULE_ML_COMMENT { mRULE_ML_COMMENT(); } break; case 22 : // InternalXtype.g:1:154: RULE_SL_COMMENT { mRULE_SL_COMMENT(); } break; case 23 : // InternalXtype.g:1:170: RULE_WS { mRULE_WS(); } break; case 24 : // InternalXtype.g:1:178: RULE_ANY_OTHER { mRULE_ANY_OTHER(); } break; } }
python
def is_secretly_root(lib): """ Detect an edge case where ROOT Minuit2 is detected as standalone because $ROOTSYS/lib is in LD_LIBRARY_PATH, and suggest appropriate countermeasures. """ from distutils import ccompiler libdir = os.path.dirname(lib) cc = ccompiler.new_compiler() for rootlib in ("Core","MathCore"): if not cc.find_library_file([libdir], rootlib): return False else: try: root_config('libdir') return True except OSError: raise RuntimeError("Found %s, which appears to be part of ROOT, but could not find root-config in PATH! To build against the standalone Minuit2, remove $ROOTSYS/lib from LD_LIBRARY_PATH; to build against the ROOT version, add $ROOTSYS/bin to your PATH" % lib)
java
public static CellStyle cloneCellStyle(Cell cell, CellStyle cellStyle) { return cloneCellStyle(cell.getSheet().getWorkbook(), cellStyle); }
java
public static String getStackTrace(Throwable throwable) { String stackTraceValue = null; if (throwable != null) { StringWriter writer = new StringWriter(); throwable.printStackTrace(new PrintWriter(writer)); stackTraceValue = writer.toString(); } return stackTraceValue; }
java
public void marshall(TagListEntry tagListEntry, ProtocolMarshaller protocolMarshaller) { if (tagListEntry == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(tagListEntry.getKey(), KEY_BINDING); protocolMarshaller.marshall(tagListEntry.getValue(), VALUE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def write_to_file(self, filename, filetype=None): """Write the relaxation to a file. :param filename: The name of the file to write to. The type can be autodetected from the extension: .dat-s for SDPA, .task for mosek, .csv for human readable format, or .txt for a symbolic export :type filename: str. :param filetype: Optional parameter to define the filetype. It can be "sdpa" for SDPA , "mosek" for Mosek, "csv" for human readable format, or "txt" for a symbolic export. :type filetype: str. """ if filetype == "txt" and not filename.endswith(".txt"): raise Exception("TXT files must have .txt extension!") elif filetype is None and filename.endswith(".txt"): filetype = "txt" else: return super(SteeringHierarchy, self).write_to_file(filename, filetype=filetype) tempfile_ = tempfile.NamedTemporaryFile() tmp_filename = tempfile_.name tempfile_.close() tmp_dats_filename = tmp_filename + ".dat-s" write_to_sdpa(self, tmp_dats_filename) f = open(tmp_dats_filename, 'r') f.readline();f.readline();f.readline() blocks = ((f.readline().strip().split(" = ")[0])[1:-1]).split(", ") block_offset, matrix_size = [0], 0 for block in blocks: matrix_size += abs(int(block)) block_offset.append(matrix_size) f.readline() matrix = [[0 for _ in range(matrix_size)] for _ in range(matrix_size)] for line in f: entry = line.strip().split("\t") var, block = int(entry[0]), int(entry[1])-1 row, column = int(entry[2]) - 1, int(entry[3]) - 1 value = float(entry[4]) offset = block_offset[block] matrix[offset+row][offset+column] = int(value*var) matrix[offset+column][offset+row] = int(value*var) f.close() f = open(filename, 'w') for matrix_line in matrix: f.write(str(matrix_line).replace('[', '').replace(']', '') + '\n') f.close() os.remove(tmp_dats_filename)
python
def send(self, task, result, expire=60): """ Sends the result back to the producer. This should be called if only you want to return the result in async manner. :arg task: ::class:`~retask.task.Task` object :arg result: Result data to be send back. Should be in JSON serializable. :arg expire: Time in seconds after the key expires. Default is 60 seconds. """ self.rdb.lpush(task.urn, json.dumps(result)) self.rdb.expire(task.urn, expire)
python
def subdomains_init(blockstack_opts, working_dir, atlas_state): """ Set up subdomain state Returns a SubdomainIndex object that has been successfully connected to Atlas """ if not is_subdomains_enabled(blockstack_opts): return None subdomain_state = SubdomainIndex(blockstack_opts['subdomaindb_path'], blockstack_opts=blockstack_opts) atlas_node_add_callback(atlas_state, 'store_zonefile', subdomain_state.enqueue_zonefile) return subdomain_state
java
private void determineValueType(Object value){ if (value instanceof Integer){ this.valueEncoder.valueType = INTEGER; this.valueEncoder.valueBytes = ByteBuffer.allocate(4).putInt((Integer)value).array(); } else if (value instanceof Long) { this.valueEncoder.valueType = LONG; this.valueEncoder.valueBytes = ByteBuffer.allocate(8).putLong((Long)value).array(); } else if (value == null){ this.valueEncoder.valueType = NULL; } else { ObjectOutputStream oos = null; try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); oos = new ObjectOutputStream(bos); oos.writeObject(value); this.valueEncoder.valueType = OBJECT; this.valueEncoder.valueBytes = bos.toByteArray(); } catch (Exception e) { throw new IllegalStateException("Failed to serialize value: " + value, e); } finally { try { if (oos != null){ oos.close(); } } catch (Exception e2) {/*ignore*/} } } }
python
def _new_sensor_reading(self, sensor_value): """ Call this method to signal a new sensor reading. This method handles DB storage and triggers different events. :param value: New value to be stored in the system. """ if not self._active and not self._enabled: return if self._dimensions > 1: for dimension in range(0, self._dimensions): value = sensor_value[dimension] self._sub_sensors[dimension]._new_sensor_reading(value) else: self._sensor_value.value = sensor_value
java
Map<String, JSModule> getModulesByName() { Map<String, JSModule> result = new HashMap<>(); for (JSModule m : modules) { result.put(m.getName(), m); } return result; }
java
public ListInvitationsResult withInvitations(Invitation... invitations) { if (this.invitations == null) { setInvitations(new java.util.ArrayList<Invitation>(invitations.length)); } for (Invitation ele : invitations) { this.invitations.add(ele); } return this; }
python
def initialize_bitarray(self): """Initialize both bitarray. This BF contain two bit arrays instead of single one like a plain BF. bitarray is the main bit array where all the historical items are stored. It's the one used for the membership query. The second one, current_day_bitarray is the one used for creating the daily snapshot. """ self.bitarray = bitarray.bitarray(self.nbr_bits) self.current_day_bitarray = bitarray.bitarray(self.nbr_bits) self.bitarray.setall(False) self.current_day_bitarray.setall(False)
python
def call_list(self): """For a call object that represents multiple calls, `call_list` returns a list of all the intermediate calls as well as the final call.""" vals = [] thing = self while thing is not None: if thing.from_kall: vals.append(thing) thing = thing.parent return _CallList(reversed(vals))
java
public <E> Iterable<E> searchForAll(final Searchable<E> searchable) { try { return searchForAll(configureMatcher(searchable).asList()); } finally { MatcherHolder.unset(); } }
python
def get_real_time_locate(host_ipaddress, auth, url): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. Note: Although intended to return a single location, Multiple locations may be returned for a single host due to a partially discovered network or misconfigured environment. :param host_ipaddress: str value valid IPv4 IP address :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: list of dictionaries where each element of the list represents the location of the target host :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url) >>> assert type(found_device) is list >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> assert 'deviceId' in found_device[0] >>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url) >>> assert type(no_device) is dict >>> assert len(no_device) == 0 """ f_url = url + "/imcrs/res/access/realtimeLocate?type=2&value=" + str(host_ipaddress) + \ "&total=false" response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: response = json.loads(response.text) if 'realtimeLocation' in response: real_time_locate = response['realtimeLocation'] if isinstance(real_time_locate, dict): real_time_locate = [real_time_locate] return real_time_locate else: return json.loads(response)['realtimeLocation'] else: print("Host not found") return 403 except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_real_time_locate: An Error has occured"
java
@Pure public static double distanceSquaredSegmentPoint( double sx1, double sy1, double sz1, double sx2, double sy2, double sz2, double px, double py, double pz) { double ratio = getPointProjectionFactorOnSegmentLine(px, py, pz, sx1, sy1, sz1, sx2, sy2, sz2); if (ratio <= 0.) return FunctionalPoint3D.distanceSquaredPointPoint(px, py, pz, sx1, sy1, sz1); if (ratio >= 1.) return FunctionalPoint3D.distanceSquaredPointPoint(px, py, pz, sx2, sy2, sz2); return FunctionalPoint3D.distanceSquaredPointPoint( px, py, pz, (1. - ratio) * sx1 + ratio * sx2, (1. - ratio) * sy1 + ratio * sy2, (1. - ratio) * sz1 + ratio * sz2); }
java
public static boolean isColumnNullable (Connection conn, String table, String column) throws SQLException { ResultSet rs = getColumnMetaData(conn, table, column); try { return rs.getString("IS_NULLABLE").equals("YES"); } finally { rs.close(); } }
python
def nb_to_html(nb_path): """convert notebook to html""" exporter = html.HTMLExporter(template_file='full') output, resources = exporter.from_filename(nb_path) header = output.split('<head>', 1)[1].split('</head>',1)[0] body = output.split('<body>', 1)[1].split('</body>',1)[0] # http://imgur.com/eR9bMRH header = header.replace('<style', '<style scoped="scoped"') header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n', '') # Filter out styles that conflict with the sphinx theme. filter_strings = [ 'navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{', ] filter_strings.extend(['h%s{' % (i+1) for i in range(6)]) header_lines = filter( lambda x: not any([s in x for s in filter_strings]), header.split('\n')) header = '\n'.join(header_lines) # concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') return '\n'.join(lines)
python
def pow2_quantize(x, sign=True, with_zero=True, n=8, m=1, quantize=True, ste_fine_grained=True, outputs=None): r"""Pow2 Quantize Args: x (Variable): An input variable. sign (bool): Indicate the signed number or the unsigned number. Default is true. with_zero (bool): Indicate using zero as a quantized value. Default is true. Note that `zero` consumes one bit. n (int): Bit width used. Note that `sign` consumes one bit. :math:`n-1` is used for number representation in `signed` case. Default is 8. m (int): :math:`2^m` is the upper bound of the dynamic range and :math:`-2^m` is the lower bound, :math:`m \in \mathcal{Z}`. Default is 1. quantize (bool): If true, quantize input, otherwise not. ste_fine_grained (bool): If true, STE is not 1. Returns: ~nnabla.Variable: N-D array. See Also: ``nnabla.function_bases.pow2_quantize``. In the forward pass of `signed` case, .. math:: q_i= \left\{ \begin{array}{ll} max_{+} & if \ \ \overline{q_i} > max_{+} \\ \overline{q_i} & if \ \ min_{+} \le \overline{q_i} \le max_{+} \\ min_{+} & if \ \ 0 \le \overline{q_i} < min_{+} \\ min_{-} & if \ \ min_{-} < \overline{q_i} < 0 \\ \overline{q_i} & if \ \ max_{-} \le \overline{q_i} \le min_{-}\\ max_{-} & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right., where .. math:: && max_{+} = 2^{m}, min_{+} = 2^{m - (2^{n-1} - 1)},\\ && max_{-} = -2^{m}, min_{-} = -2^{m - (2^{n-1} - 1)},\\ && \overline{q_i} = sign(x_i) \times 2^{round(\log_2 |x_i|)}. This quantization uses the geometric mean between two power-of-two numbers as quantization threshold. In the forward pass of `unsigned` case, .. math:: q_i= \left\{ \begin{array}{ll} max & if \ \ \overline{q_i} > max \\ \overline{q_i} & if \ \ min \le \overline{q_i} \le max \\ min & if \ \ 0 < \overline{q_i} < min \\ \end{array} \right., where .. math:: && max = 2^{m}, min = 2^{m - (2^{n} - 1)},\\ && \overline{q_i} = 2^{int(\log_2 |x_i|)}. When using `with_zero` as true, a pruning threshold is used to round an input to 0 or :math:`min`. The pruning threshold is defined in this function as the following, .. math:: pruning\ threshold = min \times 2^{-\frac{1}{2}}. If an absolute value of the input is lesser than this value, the input is rounded to 0, otherwise :math:`min`. In the backward pass when using ste_fine_grained as false, .. math:: \frac{\partial q_i}{\partial x_i} = 1. In the backward pass when using ste_fine_grained as true, .. math:: \frac{\partial q_i}{\partial x_i}= \left\{ \begin{array}{ll} 0 & if \ \ \overline{q_i} > max_{+} \\ 1 & if \ \ otherwise \\ 0 & if \ \ \overline{q_i} < max_{-} \\ \end{array} \right.. """ from .function_bases import pow2_quantize as pow2_quantize_base if not quantize: return x return pow2_quantize_base(x, sign, with_zero, n, m, ste_fine_grained, outputs=outputs)
python
def value(self): """ Returns the current value (adjusted for the time decay) :rtype: float """ with self.lock: now = time.time() dt = now - self.t0 self.t0 = now self.p = self.p * (math.pow(self.e, self.r * dt)) return self.p
python
def _wikipedia_known_port_ranges(): """ Returns used port ranges according to Wikipedia page. This page contains unofficial well-known ports. """ req = urllib2.Request(WIKIPEDIA_PAGE, headers={'User-Agent' : "Magic Browser"}) page = urllib2.urlopen(req).read().decode('utf8') # just find all numbers in table cells ports = re.findall('<td>((\d+)(\W(\d+))?)</td>', page, re.U) return ((int(p[1]), int(p[3] if p[3] else p[1])) for p in ports)
python
def configure(self, options, config): """Configures the xunit plugin.""" Plugin.configure(self, options, config) self.config = config if self.enabled: self.jinja = Environment( loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')), trim_blocks=True, lstrip_blocks=True ) self.stats = {'errors': 0, 'failures': 0, 'passes': 0, 'skipped': 0} self.report_data = defaultdict(Group) self.report_file = codecs.open(options.html_file, 'w', self.encoding, 'replace')
python
def parse(self): """Check input and return a :class:`Migration` instance.""" if not self.parsed.get('migration'): raise ParseError(u"'migration' key is missing", YAML_EXAMPLE) self.check_dict_expected_keys( {'options', 'versions'}, self.parsed['migration'], 'migration', ) return self._parse_migrations()
python
def cmd_notice(self, connection, sender, target, payload): """ Sends a message """ msg_target, topic, content = self.parse_payload(payload) def callback(sender, payload): logging.info("NOTICE ACK from %s: %s", sender, payload) self.__herald.notice(msg_target, topic, content, callback)
python
def update_object(self, ref, payload, return_fields=None): """Update an Infoblox object Args: ref (str): Infoblox object reference payload (dict): Payload with data to send Returns: The object reference of the updated object Raises: InfobloxException """ query_params = self._build_query_params(return_fields=return_fields) opts = self._get_request_options(data=payload) url = self._construct_url(ref, query_params) self._log_request('put', url, opts) r = self.session.put(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('update', r, ref) raise ib_ex.InfobloxCannotUpdateObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
java
private static ReloadableType getReloadableTypeIfHasBeenReloaded(Class<?> clazz) { if (TypeRegistry.nothingReloaded) { return null; } ReloadableType rtype = getRType(clazz); if (rtype != null && rtype.hasBeenReloaded()) { return rtype; } else { return null; } }
python
def add_to_postmortem_exclusion_list(cls, pathname, bits = None): """ Adds the given filename to the exclusion list for postmortem debugging. @warning: This method requires administrative rights. @see: L{get_postmortem_exclusion_list} @type pathname: str @param pathname: Application pathname to exclude from postmortem debugging. @type bits: int @param bits: Set to C{32} for the 32 bits debugger, or C{64} for the 64 bits debugger. Set to {None} for the default (L{System.bits}). @raise WindowsError: Raises an exception on error. """ if bits is None: bits = cls.bits elif bits not in (32, 64): raise NotImplementedError("Unknown architecture (%r bits)" % bits) if bits == 32 and cls.bits == 64: keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList' else: keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList' try: key = cls.registry[keyname] except KeyError: key = cls.registry.create(keyname) key[pathname] = 1
python
def recipients(messenger, addresses): """Structures recipients data. :param str|unicode, MessageBase messenger: MessengerBase heir :param list[str|unicode]|str|unicode addresses: recipients addresses or Django User model heir instances (NOTE: if supported by a messenger) :return: list of Recipient :rtype: list[Recipient] """ if isinstance(messenger, six.string_types): messenger = get_registered_messenger_object(messenger) return messenger._structure_recipients_data(addresses)
java
public int compareTo(LogPosition o) { final int val = fileName.compareTo(o.fileName); if (val == 0) { return (int) (position - o.position); } return val; }
python
def fromInputs(self, inputs): """ Extract the inputs associated with the child forms of this parameter from the given dictionary and coerce them using C{self.coercer}. @type inputs: C{dict} mapping C{unicode} to C{list} of C{unicode} @param inputs: The contents of a form post, in the conventional structure. @rtype: L{Deferred} @return: The structured data associated with this parameter represented by the post data. """ try: values = inputs[self.name] except KeyError: raise ConfigurationError( "Missing value for input: " + self.name) return self.coerceMany(values)
java
public void load(final Runnable afterLoad) { CmsRpcAction<CmsAliasInitialFetchResult> action = new CmsRpcAction<CmsAliasInitialFetchResult>() { /** * @see org.opencms.gwt.client.rpc.CmsRpcAction#execute() */ @Override public void execute() { getService().getAliasTable(this); start(0, true); } /** * @see org.opencms.gwt.client.rpc.CmsRpcAction#onResponse(java.lang.Object) */ @Override public void onResponse(CmsAliasInitialFetchResult aliasTable) { stop(false); String lockOwner = aliasTable.getAliasTableLockOwner(); if (lockOwner != null) { String errorMessage = CmsAliasMessages.messageAliasTableLocked(lockOwner); String title = CmsAliasMessages.messageAliasTableLockedTitle(); CmsAlertDialog alert = new CmsAlertDialog(title, errorMessage); alert.center(); } else { m_downloadUrl = aliasTable.getDownloadUrl(); m_initialData = aliasTable.getRows(); m_siteRoot = CmsCoreProvider.get().getSiteRoot(); List<CmsAliasTableRow> copiedData = copyData(m_initialData); List<CmsRewriteAliasTableRow> rewriteData = aliasTable.getRewriteAliases(); m_view.setData(copiedData, rewriteData); if (afterLoad != null) { afterLoad.run(); } } } }; action.execute(); }
java
@Override public void startService() { Utils.require(OLAPService.instance().getState().isInitialized(), "OLAPMonoService requires the OLAPService"); OLAPService.instance().waitForFullService(); }
java
public void occurs(int minOccurs, int maxOccurs) throws SAXException{ if(minOccurs!= 1) xml.addAttribute("minOccurs", String.valueOf(minOccurs)); if(maxOccurs!=1) xml.addAttribute("maxOccurs", maxOccurs==-1 ? "unbounded" : String.valueOf(maxOccurs)); }
python
def get_index_html (urls): """ Construct artificial index.html from given URLs. @param urls: URL strings @type urls: iterator of string """ lines = ["<html>", "<body>"] for entry in urls: name = cgi.escape(entry) try: url = cgi.escape(urllib.quote(entry)) except KeyError: # Some unicode entries raise KeyError. url = name lines.append('<a href="%s">%s</a>' % (url, name)) lines.extend(["</body>", "</html>"]) return os.linesep.join(lines)
python
def catch_timeout(f): """ A decorator to handle read timeouts from Twitter. """ def new_f(self, *args, **kwargs): try: return f(self, *args, **kwargs) except (requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.ReadTimeoutError) as e: log.warning("caught read timeout: %s", e) self.connect() return f(self, *args, **kwargs) return new_f
java
public static CommerceTaxMethod fetchByGroupId_Last(long groupId, OrderByComparator<CommerceTaxMethod> orderByComparator) { return getPersistence().fetchByGroupId_Last(groupId, orderByComparator); }
python
def offline(path): ''' Mark a cache storage device as offline. The storage is identified by a path which must match exactly a path specified in storage.config. This removes the storage from the cache and redirects requests that would have used this storage to other storage. This has exactly the same effect as a disk failure for that storage. This does not persist across restarts of the traffic_server process. .. code-block:: bash salt '*' trafficserver.offline /path/to/cache ''' if _TRAFFICCTL: cmd = _traffic_ctl('storage', 'offline', path) else: cmd = _traffic_line('--offline', path) return _subprocess(cmd)
python
def _from_safe_path_param_name(safe_parameter): """Takes a safe regex group name and converts it back to the original value. Only alphanumeric characters and underscore are allowed in variable name tokens, and numeric are not allowed as the first character. The safe_parameter is a base32 representation of the actual value. Args: safe_parameter: A string that was generated by _to_safe_path_param_name. Returns: A string, the parameter matched from the URL template. """ assert safe_parameter.startswith('_') safe_parameter_as_base32 = safe_parameter[1:] padding_length = - len(safe_parameter_as_base32) % 8 padding = '=' * padding_length return base64.b32decode(safe_parameter_as_base32 + padding)
java
@Override public void shutdown() { LOG.info("Shutting down ..."); shouldShutdown = true; if (tserver != null) { tserver.stop(); } started = false; }
python
def alarm_set(self, time, wake_with_radio=False): """ set the alarm clock :param str time: time of the alarm (format: %H:%M:%S) :param bool wake_with_radio: if True, radio will be used for the alarm instead of beep sound """ # TODO: check for correct time format log.debug("alarm => set...") params = { "enabled": True, "time": time, "wake_with_radio": wake_with_radio } self._app_exec("com.lametric.clock", "clock.alarm", params=params)
java
public List<CmsResource> getTopMovedFolders(CmsObject cms) throws CmsException { List<CmsResource> movedFolders = getMovedFolders(cms); List<CmsResource> result = getTopFolders(movedFolders); return result; }
java
@Override public IndentedPrintWriter printf(String format, Object... args) { super.format(format, args); return this; }
python
def sg_summary_audio(tensor, sample_rate=16000, prefix=None, name=None): r"""Register `tensor` to summary report as audio Args: tensor: A `Tensor` to log as audio sample_rate : An int. Sample rate to report. Default is 16000. prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics if not tf.get_variable_scope().reuse: tf.summary.audio(name + '-au', tensor, sample_rate)
java
public void setYUnits(Integer newYUnits) { Integer oldYUnits = yUnits; yUnits = newYUnits; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.IID__YUNITS, oldYUnits, yUnits)); }
java
@XmlElementDecl(namespace = "http://docs.oasis-open.org/ns/cmis/messaging/200908/", name = "removeACEs", scope = CheckIn.class) public JAXBElement<CmisAccessControlListType> createCheckInRemoveACEs( CmisAccessControlListType value) { return new JAXBElement<CmisAccessControlListType>( _CreateDocumentRemoveACEs_QNAME, CmisAccessControlListType.class, CheckIn.class, value); }
python
def _stop_trial(self, trial, error=False, error_msg=None, stop_logger=True): """Stops this trial. Stops this trial, releasing all allocating resources. If stopping the trial fails, the run will be marked as terminated in error, but no exception will be thrown. Args: error (bool): Whether to mark this trial as terminated in error. error_msg (str): Optional error message. stop_logger (bool): Whether to shut down the trial logger. """ if stop_logger: trial.close_logger() if error: self.set_status(trial, Trial.ERROR) else: self.set_status(trial, Trial.TERMINATED) try: trial.write_error_log(error_msg) if hasattr(trial, "runner") and trial.runner: if (not error and self._reuse_actors and self._cached_actor is None): logger.debug("Reusing actor for {}".format(trial.runner)) self._cached_actor = trial.runner else: logger.info( "Destroying actor for trial {}. If your trainable is " "slow to initialize, consider setting " "reuse_actors=True to reduce actor creation " "overheads.".format(trial)) trial.runner.stop.remote() trial.runner.__ray_terminate__.remote() except Exception: logger.exception("Error stopping runner for Trial %s", str(trial)) self.set_status(trial, Trial.ERROR) finally: trial.runner = None
python
def powernode_data(self, name:str) -> Powernode: """Return a Powernode object describing the given powernode""" self.assert_powernode(name) contained_nodes = frozenset(self.nodes_in(name)) return Powernode( size=len(contained_nodes), contained=frozenset(self.all_in(name)), contained_pnodes=frozenset(self.powernodes_in(name)), contained_nodes=contained_nodes, )
java
public static BoundGoro bindWith(final Context context, final BoundGoro.OnUnexpectedDisconnection handler) { if (context == null) { throw new IllegalArgumentException("Context cannot be null"); } if (handler == null) { throw new IllegalArgumentException("Disconnection handler cannot be null"); } return new BoundGoroImpl(context, handler); }
python
def _get_logging_id(self): """Get logging identifier.""" return "{}.{}/{}".format( self._request.viewset_class.__module__, self._request.viewset_class.__name__, self._request.viewset_method, )
python
def _process_keystroke_commands(self, inp): """Process keystrokes that issue commands (side effects).""" if inp in (u'1', u'2'): # chose 1 or 2-character wide if int(inp) != self.screen.wide: self.screen.wide = int(inp) self.on_resize(None, None) elif inp in (u'_', u'-'): # adjust name length -2 nlen = max(1, self.screen.style.name_len - 2) if nlen != self.screen.style.name_len: self.screen.style.name_len = nlen self.on_resize(None, None) elif inp in (u'+', u'='): # adjust name length +2 nlen = min(self.term.width - 8, self.screen.style.name_len + 2) if nlen != self.screen.style.name_len: self.screen.style.name_len = nlen self.on_resize(None, None) elif inp == u'2' and self.screen.wide != 2: # change 2 or 1-cell wide view self.screen.wide = 2 self.on_resize(None, None)
java
public StringParam setContains(boolean theContains) { myContains = theContains; if (myContains) { setExact(false); setMissing(null); } return this; }
java
@Override public List<IScan> getScansByRtLower(double rt) { Map.Entry<Double, List<IScan>> lowerEntry = getRt2scan().lowerEntry(rt); if (lowerEntry == null) { return null; } List<IScan> scans = lowerEntry.getValue(); if (scans != null) { return scans; } return null; }
java
private int calcLastPageSkip(int total, int skip, int limit) { if (skip > total - limit) { return skip; } if (total % limit > 0) { return total - total % limit; } return total - limit; }
java
private <T> T executeHttpCallWithRetry(HttpCall<T> call, int runCount) { try { return call.execute(); } catch (PackageManagerHttpActionException ex) { // retry again if configured so... if (runCount < props.getRetryCount()) { log.info("ERROR: " + ex.getMessage()); log.debug("HTTP call failed.", ex); log.info("---------------"); StringBuilder msg = new StringBuilder(); msg.append("HTTP call failed, try again (" + (runCount + 1) + "/" + props.getRetryCount() + ")"); if (props.getRetryDelaySec() > 0) { msg.append(" after " + props.getRetryDelaySec() + " second(s)"); } msg.append("..."); log.info(msg); if (props.getRetryDelaySec() > 0) { try { Thread.sleep(props.getRetryDelaySec() * DateUtils.MILLIS_PER_SECOND); } catch (InterruptedException ex1) { // ignore } } return executeHttpCallWithRetry(call, runCount + 1); } else { throw ex; } } }
java
public static List<String> findParamsInQuery(String query, boolean changeToLower) { List<String> result = new ArrayList<>(); Matcher matcher = PARAM_PATTERN.matcher(query); while (matcher.find()) { if (matcher.group("PARAM") != null) { String param; if (changeToLower) { param = matcher.group("PARAM").toLowerCase(); } else { param = matcher.group("PARAM"); } if (!result.contains(param)) { result.add(param); } } } return result; }
python
def sys_pipes_forever(encoding=_default_encoding): """Redirect all C output to sys.stdout/err This is not a context manager; it turns on C-forwarding permanently. """ global _mighty_wurlitzer if _mighty_wurlitzer is None: _mighty_wurlitzer = sys_pipes(encoding) _mighty_wurlitzer.__enter__()
java
public JFeatureSpec<T> cross(final String x1, final String x2, final BiFunction<Double, Double, Double> f) { Function2<Object, Object, Object> g = JavaOps.crossFn(f); return wrap(self.cross(Tuple2.apply(x1, x2), g)); }
python
def _add_cpu_percent(self, cur_read): """Compute cpu percent basing on the provided utilisation """ for executor_id, cur_data in cur_read.items(): stats = cur_data['statistics'] cpus_limit = stats.get('cpus_limit') cpus_utilisation = stats.get('cpus_utilisation') if cpus_utilisation and cpus_limit != 0: stats['cpus_percent'] = cpus_utilisation / cpus_limit
java
public void scanConfigurableBeans(String... basePackages) throws BeanRuleException { if (basePackages == null || basePackages.length == 0) { return; } log.info("Auto component scanning on packages [" + StringUtils.joinCommaDelimitedList(basePackages) + "]"); for (String basePackage : basePackages) { BeanClassScanner scanner = new BeanClassScanner(classLoader); List<BeanRule> beanRules = new ArrayList<>(); scanner.scan(basePackage + ".**", (resourceName, targetClass) -> { if (targetClass.isAnnotationPresent(Component.class)) { BeanRule beanRule = new BeanRule(); beanRule.setBeanClass(targetClass); beanRule.setScopeType(ScopeType.SINGLETON); beanRules.add(beanRule); } }); for (BeanRule beanRule : beanRules) { saveConfigurableBeanRule(beanRule); } } }
python
def buy_open(id_or_ins, amount, price=None, style=None): """ 买入开仓。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #以价格为3500的限价单开仓买入2张上期所AG1607合约: buy_open('AG1607', amount=2, price=3500)) """ return order(id_or_ins, amount, SIDE.BUY, POSITION_EFFECT.OPEN, cal_style(price, style))
java
public boolean remove (ObjectInfo info) { int opos = indexOf(info); if (opos >= 0) { remove(opos); return true; } else { return false; } }
java
public static int expectHCOLON(final Buffer buffer) throws SipParseException { final int consumed = expectHCOLONStreamFriendly(buffer); if (consumed == -1) { // -1 means we ran out of bytes in the stream but in those // cases where we are not really dealing with a stream we // would expect an IndexOutOfBoundsException so let's make // sure to still honor that... throw new IndexOutOfBoundsException(); } return consumed; }
python
def generate_json_docs(module, pretty_print=False, user=None): """Return a JSON string format of a Pale module's documentation. This string can either be printed out, written to a file, or piped to some other tool. This method is a shorthand for calling `generate_doc_dict` and passing it into a json serializer. The user argument is optional. If included, it expects the user to be an object with an "is_admin" boolean attribute. Any endpoint protected with a "@requires_permission" decorator will require user.is_admin == True to display documentation on that endpoint. """ indent = None separators = (',', ':') if pretty_print: indent = 4 separators = (',', ': ') module_doc_dict = generate_doc_dict(module, user) json_str = json.dumps(module_doc_dict, indent=indent, separators=separators) return json_str
python
def export(self, class_name, method_name, export_data=False, export_dir='.', export_filename='data.json', export_append_checksum=False, **kwargs): """ Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders. """ # Arguments: self.class_name = class_name self.method_name = method_name # Estimator: est = self.estimator self.n_features = len(est.sigma_[0]) self.n_classes = len(est.classes_) temp_type = self.temp('type') temp_arr = self.temp('arr') temp_arr_ = self.temp('arr[]') temp_arr__ = self.temp('arr[][]') # Create class prior probabilities: priors = [temp_type.format(self.repr(c)) for c in est.class_prior_] priors = ', '.join(priors) self.priors = temp_arr_.format(type='double', name='priors', values=priors) # Create sigmas: sigmas = [] for sigma in est.sigma_: tmp = [temp_type.format(self.repr(s)) for s in sigma] tmp = temp_arr.format(', '.join(tmp)) sigmas.append(tmp) sigmas = ', '.join(sigmas) self.sigmas = temp_arr__.format(type='double', name='sigmas', values=sigmas) # Create thetas: thetas = [] for theta in est.theta_: tmp = [temp_type.format(self.repr(t)) for t in theta] tmp = temp_arr.format(', '.join(tmp)) thetas.append(tmp) thetas = ', '.join(thetas) self.thetas = temp_arr__.format(type='double', name='thetas', values=thetas) if self.target_method == 'predict': # Exported: if export_data and os.path.isdir(export_dir): self.export_data(export_dir, export_filename, export_append_checksum) return self.predict('exported') # Separated: return self.predict('separated')
java
public static @CheckForNull BugCollectionAndInstance findBugCollectionAndInstanceForMarker(IMarker marker) { IResource resource = marker.getResource(); IProject project = resource.getProject(); if (project == null) { // Also shouldn't happen. FindbugsPlugin.getDefault().logError("No project for warning marker"); return null; } if (!isFindBugsMarker(marker)) { // log disabled because otherwise each selection in problems view // generates // 6 new errors (we need refactor all bug views to get rid of this). // FindbugsPlugin.getDefault().logError("Selected marker is not a FindBugs marker"); // FindbugsPlugin.getDefault().logError(marker.getType()); // FindbugsPlugin.getDefault().logError(FindBugsMarker.NAME); return null; } // We have a FindBugs marker. Get the corresponding BugInstance. String bugId = marker.getAttribute(FindBugsMarker.UNIQUE_ID, null); if (bugId == null) { FindbugsPlugin.getDefault().logError("Marker does not contain unique id for warning"); return null; } try { BugCollection bugCollection = FindbugsPlugin.getBugCollection(project, null); if (bugCollection == null) { FindbugsPlugin.getDefault().logError("Could not get BugCollection for SpotBugs marker"); return null; } String bugType = (String) marker.getAttribute(FindBugsMarker.BUG_TYPE); Integer primaryLineNumber = (Integer) marker.getAttribute(FindBugsMarker.PRIMARY_LINE); // compatibility if (primaryLineNumber == null) { primaryLineNumber = Integer.valueOf(getEditorLine(marker)); } if (bugType == null) { FindbugsPlugin.getDefault().logError( "Could not get find attributes for marker " + marker + ": (" + bugId + ", " + primaryLineNumber + ")"); return null; } BugInstance bug = bugCollection.findBug(bugId, bugType, primaryLineNumber.intValue()); if(bug == null) { FindbugsPlugin.getDefault().logError( "Could not get find bug for marker on " + resource + ": (" + bugId + ", " + primaryLineNumber + ")"); return null; } return new BugCollectionAndInstance(bugCollection, bug); } catch (CoreException e) { FindbugsPlugin.getDefault().logException(e, "Could not get BugInstance for SpotBugs marker"); return null; } }
java
public Stream<YearQuarter> quartersUntil(YearQuarter endExclusive) { if (endExclusive.isBefore(this)) { throw new IllegalArgumentException(endExclusive + " < " + this); } long intervalLength = until(endExclusive, QUARTER_YEARS); return LongStream.range(0,intervalLength).mapToObj(n -> plusQuarters(n)); }
python
def _make_from_epo(cls, trg_comp, qr_comp, trg_chrom_sizes, qr_chrom_sizes): """crate a chain of collinear rings from the given components. The target of the chain will always be on the forward strand. This is done to avoid confusion when mapping psl files. So, if trg_comp.strand=-, qr_comp.strand=- (resp. +) the chain header will have tStrand=+, qStrand=+ (resp. -). No strand changes on the other cases. :param trg_comp: target (i.e, the first) component :type trg_comp: L{EPOitem} :param qr_comp: query (i.e, the second) component :type qr_comp: L{EPOitem} :param trg_chrom_sizes: chromosome sizes of the target :type trg_chrom_sizes: dictionary of the type (chrom) --> size :param qr_chrom_sizes: chromosome sizes of the query :type qr_chrom_sizes: dictionary of the type (chrom) --> size :return: A L{Chain} instance""" # size, target, query arrays S, T, Q = [], [], [] #the target strand of the chain must be on the forward strand trg_intervals = trg_comp.intervals(reverse = trg_comp.strand == '-') qr_intervals = qr_comp.intervals(reverse = trg_comp.strand == '-') if len(trg_intervals) == 0 or len(qr_intervals) == 0: log.warning("deletion/insertion only intervals") return None A, B = rem_dash(trg_intervals, qr_intervals) # correct for when cigar starts/ends with dashes (in number of bases) tr_start_correction = max(B[0][0] - A[0][0], 0) tr_end_correction = max(A[-1][1] - B[-1][1], 0) qr_start_correction = max(A[0][0] - B[0][0], 0) qr_end_correction = max(B[-1][1] - A[-1][1], 0) a, b = A.pop(0), B.pop(0) # intervals are 0-base, halfo-open => lengths = coordinate difference while A or B: if a[1] < b[1]: T.append(0); Q.append( A[0][0] - a[1] ); S.append( min(a[1], b[1]) - max(a[0], b[0]) ) a = A.pop(0) elif b[1] < a[1]: Q.append(0); T.append( B[0][0] - b[1] ); S.append( min(a[1], b[1]) - max(a[0], b[0]) ) b = B.pop(0) elif A and B: assert 1 > 2, "there are dash columns" else: break S.append( min(a[1], b[1]) - max(a[0], b[0]) ) assert len(T) == len(Q) == len(S) - 1, "(S, T, Q) = (%d, %d, %d)" % tuple(map(len, (S, T, Q))) tSize = trg_chrom_sizes[trg_comp.chrom] qSize = qr_chrom_sizes[qr_comp.chrom] ## UCSC coordinates are 0-based, half-open and e! coordinates are 1-base, closed ## chain_start = epo_start - 1 and chain_end = epo_end if qr_comp.strand == '+': chain = Chain(0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_start_correction, qr_comp.end - qr_end_correction, qr_comp.gabid) else: chain = Chain(0, trg_comp.chrom, tSize, "+", (trg_comp.start - 1) + tr_start_correction, trg_comp.end - tr_end_correction, qr_comp.chrom, qSize, (qr_comp.strand == trg_comp.strand and '+' or '-'), (qr_comp.start - 1) + qr_end_correction, qr_comp.end - qr_start_correction, qr_comp.gabid) # strand correction. in UCSC coordinates this is: size - coord if chain.qStrand == '-': chain = chain._replace(qEnd = chain.qSize - chain.qStart, qStart = chain.qSize - chain.qEnd) assert chain.tEnd - chain.tStart == sum(S) + sum(T), "[%s] %d != %d" % (str(chain), chain.tEnd - chain.tStart, sum(S) + sum(T)) assert chain.qEnd - chain.qStart == sum(S) + sum(Q), "[%s] %d != %d" % (str(chain), chain.qEnd - chain.qStart, sum(S) + sum(Q)) return chain, S, T, Q
python
def min(self): """ Return the minimum element (or element-based computation). """ if(self._clean.isDict()): return self._wrap(list()) return self._wrap(min(self.obj))
python
def asarray_ndim(a, *ndims, **kwargs): """Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray """ allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a
java
private void acceptNewState( double fx_candidate ) { DMatrixRMaj tmp = x; x = x_next; x_next = tmp; fx = fx_candidate; mode = Mode.COMPUTE_DERIVATIVES; }
python
def _crop_data(self): """ Crop the ``data`` and ``mask`` to have an integer number of background meshes of size ``box_size`` in both dimensions. The data are cropped on the top and/or right edges (this is the best option for the "zoom" interpolator). Returns ------- result : `~numpy.ma.MaskedArray` The cropped data and mask as a masked array. """ ny_crop = self.nyboxes * self.box_size[1] nx_crop = self.nxboxes * self.box_size[0] crop_slc = index_exp[0:ny_crop, 0:nx_crop] if self.mask is not None: mask = self.mask[crop_slc] else: mask = False return np.ma.masked_array(self.data[crop_slc], mask=mask)
java
public static Object shiftSignedRight(Object arg1, Object arg2) throws NoSuchMethodException { int code1 = typeCode(arg1); int code2 = typeCode(arg2); if (code1 <= INT) { int val1 = unboxCharOrInt(arg1, code1); if (code2 <= INT) { int val2 = unboxCharOrInt(arg2, code2); return boxToInteger(val1 >> val2); } if (code2 <= LONG) { long val2 = unboxCharOrLong(arg2, code2); return boxToInteger(val1 >> val2); } } if (code1 <= LONG) { long val1 = unboxCharOrLong(arg1, code1); if (code2 <= INT) { int val2 = unboxCharOrInt(arg2, code2); return boxToLong(val1 >> val2); } if (code2 <= LONG) { long val2 = unboxCharOrLong(arg2, code2); return boxToLong(val1 >> val2); } } throw new NoSuchMethodException(); }
python
def unique(self, *args): """ Returns all unique values as a DataFrame. This is executing: SELECT DISTINCT <name_of_the_column_1> , <name_of_the_column_2> , <name_of_the_column_3> ... FROM <name_of_the_table> Parameters ---------- *args: columns as strings Examples -------- >>> from db import DemoDB >>> db = DemoDB() >>> db.tables.Track.unique("GenreId") GenreId 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20 21 21 22 22 23 23 24 24 25 >>> len(db.tables.Track.unique("GenreId", "MediaTypeId")) 38 """ q = self._query_templates['table']['unique'].format(columns=self._format_columns(args), schema=self.schema, table=self.name) return pd.read_sql(q, self._con)
python
def get_headerReference(self, type_): """Return headerReference element of *type_* or None if not present.""" matching_headerReferences = self.xpath( "./w:headerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_) ) if len(matching_headerReferences) == 0: return None return matching_headerReferences[0]
python
def _load_neighbors(self) -> None: """ Loads all neighbors of the node from the local database and from the external data source if needed. """ if not self.are_neighbors_cached: self._load_neighbors_from_external_source() db: GraphDatabaseInterface = self._graph.database db_node: DBNode = db.Node.find_by_name(self.name) db_node.are_neighbors_cached = True db.session.commit() self.are_neighbors_cached = True if not self._are_neighbors_loaded: self._load_neighbors_from_database()
java
public @Nullable Scriptable jsFunction_firstHeader(String name) { Header h = req.getFirstHeader(name); return h == null ? null : makeJsHeader(Context.getCurrentContext(), h); }
java
public void removeNode(N node) { Pair<HashSet<N>, HashSet<N>> p = nodes.remove(node); if(p == null) return; //Outgoing edges we can ignore removint he node drops them. We need to avoid dangling incoming edges to this node we have removed HashSet<N> incomingNodes = p.getIncoming(); for(N incomingNode : incomingNodes) nodes.get(incomingNode).getOutgoing().remove(node); }
java
private static <M extends Model> URL convertFxmlUrl(final M model, final String fxmlPath) { URL fxmlUrl = null; // Replace all '.' separator by path separator '/' if (model != null) { // Try to load the resource from the same path as the model class fxmlUrl = model.getClass().getResource(fxmlPath); } if (fxmlUrl == null) { // Try to load the resource from the full path org/jrebirth/core/ui/Test.fxml fxmlUrl = Thread.currentThread().getContextClassLoader().getResource(fxmlPath); } return fxmlUrl; }
java
private int[] findText(String str, String key, PluralFormat pluralFormatKey, int startingAt) { RbnfLenientScanner scanner = formatter.getLenientScanner(); if (pluralFormatKey != null) { FieldPosition position = new FieldPosition(NumberFormat.INTEGER_FIELD); position.setBeginIndex(startingAt); pluralFormatKey.parseType(str, scanner, position); int start = position.getBeginIndex(); if (start >= 0) { int pluralRuleStart = ruleText.indexOf("$("); int pluralRuleSuffix = ruleText.indexOf(")$", pluralRuleStart) + 2; int matchLen = position.getEndIndex() - start; String prefix = ruleText.substring(0, pluralRuleStart); String suffix = ruleText.substring(pluralRuleSuffix); if (str.regionMatches(start - prefix.length(), prefix, 0, prefix.length()) && str.regionMatches(start + matchLen, suffix, 0, suffix.length())) { return new int[]{start - prefix.length(), matchLen + prefix.length() + suffix.length()}; } } return new int[]{-1, 0}; } if (scanner != null) { // if lenient parsing is turned ON, we've got some work // ahead of us return scanner.findText(str, key, startingAt); } // if lenient parsing is turned off, this is easy. Just call // String.indexOf() and we're done return new int[]{str.indexOf(key, startingAt), key.length()}; }
java
protected List<String> getNameCache(Class cls, String pkgname) { return m_CacheNames.get(cls.getName() + "-" + pkgname); }
python
def destroy_work_item(self, id, project=None): """DestroyWorkItem. Destroys the specified work item permanently from the Recycle Bin. This action can not be undone. :param int id: ID of the work item to be destroyed permanently :param str project: Project ID or project name """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') self._send(http_method='DELETE', location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0', version='5.0', route_values=route_values)
python
def member_status(): ''' Get cluster member status .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.member_status ''' ret = {'membership': {}, 'summary': {'Valid': 0, 'Leaving': 0, 'Exiting': 0, 'Joining': 0, 'Down': 0, }} out = __execute_cmd('riak-admin', 'member-status')['stdout'].splitlines() for line in out: if line.startswith(('=', '-', 'Status')): continue if '/' in line: # We're in the summary line for item in line.split('/'): key, val = item.split(':') ret['summary'][key.strip()] = val.strip() if len(line.split()) == 4: # We're on a node status line (status, ring, pending, node) = line.split() ret['membership'][node] = { 'Status': status, 'Ring': ring, 'Pending': pending } return ret
java
private Vector2i getCellOf(Point3D coordinate) { int xCell = (int) ((coordinate.x - indexRegion.minX()) / x_size); int yCell = (int) ((coordinate.y - indexRegion.minY()) / y_size); return new Vector2i(xCell, yCell); }
python
def get_estimates_without_scope_in_month(self, customer): """ It is expected that valid row for each month contains at least one price estimate for customer, service setting, service, service project link, project and resource. Otherwise all price estimates in the row should be deleted. """ estimates = self.get_price_estimates_for_customer(customer) if not estimates: return [] tables = {model: collections.defaultdict(list) for model in self.get_estimated_models()} dates = set() for estimate in estimates: date = (estimate.year, estimate.month) dates.add(date) cls = estimate.content_type.model_class() for model, table in tables.items(): if issubclass(cls, model): table[date].append(estimate) break invalid_estimates = [] for date in dates: if any(map(lambda table: len(table[date]) == 0, tables.values())): for table in tables.values(): invalid_estimates.extend(table[date]) print(invalid_estimates) return invalid_estimates
java
@Override public void run() { try { InjectionHandler.processEvent(InjectionEvent.DIRECTORY_SCANNER_NOT_STARTED); if (!shouldRun) { //shutdown has been activated LOG.warn("this cycle terminating immediately because 'shouldRun' has been deactivated"); return; } Integer[] namespaceIds = datanode.getAllNamespaces(); for(Integer nsid : namespaceIds) { UpgradeManagerDatanode um = datanode.getUpgradeManager(nsid); if (um != null && !um.isUpgradeCompleted()) { //If distributed upgrades underway, exit and wait for next cycle. LOG.warn("this cycle terminating immediately because Distributed Upgrade is in process"); return; } } //We're are okay to run - do it delta.resetDelta(); delta.startRecordingDelta(); checkDifferenceAndReconcile(); } catch (Exception e) { //Log and continue - allows Executor to run again next cycle LOG.error("Exception during DirectoryScanner execution - will continue next cycle", e); } catch (Error er) { //Non-recoverable error - re-throw after logging the problem LOG.error("System Error during DirectoryScanner execution - permanently terminating periodic scanner", er); throw er; } finally { delta.stopRecordingDelta(); InjectionHandler.processEvent(InjectionEvent.DIRECTORY_SCANNER_FINISHED); } }
java
public static String expiresAtAsRFC1123(long expiresAt) { Calendar c = Calendar.getInstance(); c.setTimeInMillis(expiresAt); SimpleDateFormat dateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z", Locale.UK); dateFormat.setTimeZone(TimeZone.getTimeZone("GMT")); String result = dateFormat.format(c.getTime()); // System.err.println( ">> expires (RFC): " + result ); // long delta = expiresAt - System.currentTimeMillis(); // System.err.println( ">> expires in " + (delta/1000) + "s" ); return result; }
python
def _free_sequence(tmp1, tmp2=False): ''' Outputs a FREEMEM sequence for 1 or 2 ops ''' if not tmp1 and not tmp2: return [] output = [] if tmp1 and tmp2: output.append('pop de') output.append('ex (sp), hl') output.append('push de') output.append('call __MEM_FREE') output.append('pop hl') output.append('call __MEM_FREE') else: output.append('ex (sp), hl') output.append('call __MEM_FREE') output.append('pop hl') REQUIRES.add('alloc.asm') return output
java
public void add(final WComponent component, final Serializable... constraints) { add(component); if (constraints != null && constraints.length > 0) { PanelModel model = getOrCreateComponentModel(); if (model.layoutConstraints == null) { model.layoutConstraints = new HashMap<>(); } model.layoutConstraints.put(component, constraints); } }
python
def fit( self, durations, event_observed=None, timeline=None, entry=None, label="KM_estimate", left_censorship=False, alpha=None, ci_labels=None, weights=None, ): # pylint: disable=too-many-arguments,too-many-locals """ Fit the model to a right-censored dataset Parameters ---------- durations: an array, list, pd.DataFrame or pd.Series length n -- duration subject was observed for event_observed: an array, list, pd.DataFrame, or pd.Series, optional True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None timeline: an array, list, pd.DataFrame, or pd.Series, optional return the best estimate at the values in timelines (postively increasing) entry: an array, list, pd.DataFrame, or pd.Series, optional relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population entered study when they were "born". label: string, optional a string to name the column of the estimate. alpha: float, optional the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. left_censorship: bool, optional (default=False) Deprecated, use ``fit_left_censoring`` ci_labels: tuple, optional add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<1-alpha/2> weights: an array, list, pd.DataFrame, or pd.Series, optional if providing a weighted dataset. For example, instead of providing every subject as a single element of `durations` and `event_observed`, one could weigh subject differently. Returns ------- self: KaplanMeierFitter self with new properties like ``survival_function_``, ``plot()``, ``median`` """ if left_censorship: warnings.warn( "kwarg left_censorship is deprecated and will be removed in a future release. Please use ``.fit_left_censoring`` instead.", DeprecationWarning, ) self._censoring_type = CensoringType.RIGHT return self._fit(durations, event_observed, timeline, entry, label, alpha, ci_labels, weights)
java
public Map<String, Map<String, Map<String, Map<String, ?>>>> addValidationConstraint(String type, String field, Constraint c) { if (StringUtils.isBlank(type) || StringUtils.isBlank(field) || c == null) { return Collections.emptyMap(); } return getEntity(invokePut(Utils.formatMessage("_constraints/{0}/{1}/{2}", type, field, c.getName()), Entity.json(c.getPayload())), Map.class); }
java
public Boolean isInputFormat(ProvFormat format) { ProvFormatType t = provTypeMap.get(format); return (t.equals(ProvFormatType.INPUT) || t.equals(ProvFormatType.INPUTOUTPUT)); }
python
async def rename_mailbox(self, before_name: str, after_name: str, selected: SelectedMailbox = None) \ -> Optional[SelectedMailbox]: """Renames the mailbox owned by the user. See Also: `RFC 3501 6.3.5. <https://tools.ietf.org/html/rfc3501#section-6.3.5>`_ Args: before_name: The name of the mailbox before the rename. after_name: The name of the mailbox after the rename. selected: If applicable, the currently selected mailbox name. Raises: :class:`~pymap.exceptions.MailboxNotFound` :class:`~pymap.exceptions.MailboxConflict` """ ...
python
def _get_foundation(self, i): """Return a :class:`Foundation` for some deck, creating it if needed. """ if i >= len(self._foundations) or self._foundations[i] is None: oldfound = list(self._foundations) extend = i - len(oldfound) + 1 if extend > 0: oldfound += [None] * extend width = self.card_size_hint_x * self.width height = self.card_size_hint_y * self.height found = Foundation( pos=self._get_foundation_pos(i), size=(width, height), deck=i ) self.bind( pos=found.upd_pos, card_size_hint=found.upd_pos, deck_hint_step=found.upd_pos, size=found.upd_pos, deck_x_hint_offsets=found.upd_pos, deck_y_hint_offsets=found.upd_pos ) self.bind( size=found.upd_size, card_size_hint=found.upd_size ) oldfound[i] = found self._foundations = oldfound return self._foundations[i]