language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def lengths_offsets(value): """Split the given comma separated value to multiple integer values. """ values = [] for item in value.split(','): item = int(item) values.append(item) return values
python
def _read_para_echo_request_signed(self, code, cbit, clen, *, desc, length, version): """Read HIP ECHO_REQUEST_SIGNED parameter. Structure of HIP ECHO_REQUEST_SIGNED parameter [RFC 7401]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Opaque data (variable length) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 echo_request_signed.type Parameter Type 1 15 echo_request_signed.critical Critical Bit 2 16 echo_request_signed.length Length of Contents 4 32 echo_request_signed.data Opaque Data """ _data = self._read_fileng(clen) echo_request_signed = dict( type=desc, critical=cbit, length=clen, data=_data, ) _plen = length - clen if _plen: self._read_fileng(_plen) return echo_request_signed
python
def consume(self, callback, queue): """ Register a new consumer. This consumer will be configured for every protocol this factory produces so it will be reconfigured on network failures. If a connection is already active, the consumer will be added to it. Args: callback (callable): The callback to invoke when a message arrives. queue (str): The name of the queue to consume from. """ self.consumers[queue] = callback if self._client_ready.called: return self.client.consume(callback, queue)
python
def hmac_md5(s, salt): """ 获取一个字符串的 使用 salt 加密的 hmac MD5 值 :param: * s: (string) 需要进行 hash 的字符串 * salt: (string) 随机字符串 :return: * result: (string) 32位小写 MD5 值 """ hmac_md5 = hmac.new(salt.encode('utf-8'), s.encode('utf-8'), digestmod=hashlib.md5).hexdigest() return hmac_md5
python
def parent_element(self): """ Get the parent of the element @rtype: WebElementWrapper @return: Parent of webelementwrapper on which this was invoked """ def parent_element(): """ Wrapper to get parent element """ parent = self.driver_wrapper.execute_script('return arguments[0].parentNode;', self.element) wrapped_parent = WebElementWrapper(self.driver_wrapper, '', parent) return wrapped_parent return self.execute_and_handle_webelement_exceptions(parent_element, 'get parent element')
java
public void processUserDefinedFields(List<Row> fields, List<Row> values) { // Process fields Map<Integer, String> tableNameMap = new HashMap<Integer, String>(); for (Row row : fields) { Integer fieldId = row.getInteger("udf_type_id"); String tableName = row.getString("table_name"); tableNameMap.put(fieldId, tableName); FieldTypeClass fieldType = FIELD_TYPE_MAP.get(tableName); if (fieldType != null) { String fieldDataType = row.getString("logical_data_type"); String fieldName = row.getString("udf_type_label"); m_udfFields.put(fieldId, fieldName); addUserDefinedField(fieldType, UserFieldDataType.valueOf(fieldDataType), fieldName); } } // Process values for (Row row : values) { Integer typeID = row.getInteger("udf_type_id"); String tableName = tableNameMap.get(typeID); Map<Integer, List<Row>> tableData = m_udfValues.get(tableName); if (tableData == null) { tableData = new HashMap<Integer, List<Row>>(); m_udfValues.put(tableName, tableData); } Integer id = row.getInteger("fk_id"); List<Row> list = tableData.get(id); if (list == null) { list = new ArrayList<Row>(); tableData.put(id, list); } list.add(row); } }
python
def r(self, **kwargs): """ Resolve the object. This returns default (if present) or fails on an Empty. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError if 'default' in kwargs: default = kwargs.pop('default') if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
java
private String buildConnectionDescription() { List<String> descList = Lists.newArrayListWithExpectedSize(4); for(ConnectionPoolSegment segment : segments) { String desc = segment.dbConnection.getConnectionDescription(); if(!descList.contains(desc)) { descList.add(desc); } } return Joiner.on(',').join(descList); }
java
public static aaagroup_authorizationpolicy_binding[] get(nitro_service service, String groupname) throws Exception{ aaagroup_authorizationpolicy_binding obj = new aaagroup_authorizationpolicy_binding(); obj.set_groupname(groupname); aaagroup_authorizationpolicy_binding response[] = (aaagroup_authorizationpolicy_binding[]) obj.get_resources(service); return response; }
python
def _fill_lookup_prop(self, testsuites_properties): """Fills the polarion-lookup-method property.""" if not self._lookup_prop: raise Dump2PolarionException("Failed to set the 'polarion-lookup-method' property") etree.SubElement( testsuites_properties, "property", {"name": "polarion-lookup-method", "value": self._lookup_prop}, )
python
def get_aggregator_quotas(self, quota): """ Fetch ancestors quotas that have the same name and are registered as aggregator quotas. """ ancestors = quota.scope.get_quota_ancestors() aggregator_quotas = [] for ancestor in ancestors: for ancestor_quota_field in ancestor.get_quotas_fields(field_class=AggregatorQuotaField): if ancestor_quota_field.get_child_quota_name() == quota.name: aggregator_quotas.append(ancestor.quotas.get(name=ancestor_quota_field)) return aggregator_quotas
python
def max_texture_limit(self): """The maximum number of textures available for this graphic card's fragment shader.""" max_unit_array = (gl.GLint * 1)() gl.glGetIntegerv(gl.GL_MAX_TEXTURE_IMAGE_UNITS, max_unit_array) return max_unit_array[0]
java
public static List<Partition> getPartitions(IMetaStoreClient client, Table table, Optional<String> filter) throws IOException { return getPartitions(client, table, filter, Optional.<HivePartitionExtendedFilter>absent()); }
java
public static RowMajorSparseMatrix random(int rows, int columns, double density, Random random) { return CRSMatrix.random(rows, columns, density, random); }
python
def strings_equal(s1, s2): """ Timing-attack resistant string comparison. Normal comparison using == will short-circuit on the first mismatching character. This avoids that by scanning the whole string, though we still reveal to a timing attack whether the strings are the same length. """ try: s1 = unicodedata.normalize('NFKC', str(s1)) s2 = unicodedata.normalize('NFKC', str(s2)) except: s1 = unicodedata.normalize('NFKC', unicode(s1)) s2 = unicodedata.normalize('NFKC', unicode(s2)) return compare_digest(s1, s2)
python
def rubberstamp(rest): "Approve something" parts = ["Bad credit? No credit? Slow credit?"] rest = rest.strip() if rest: parts.append("%s is" % rest) karma.Karma.store.change(rest, 1) parts.append("APPROVED!") return " ".join(parts)
python
def dump(data_structure): """Dump will create a human readable version of your data-structure. It will try to dump almost anything, it has recursion detection and will try to display the recursion in a meaningful way. :param data_structure: The structure to convert. When you freeze only content counts, same content same hash >>> a = hash(freeze(_TestClass(True))) >>> b = hash(freeze(_TestClass(True))) >>> b == a True >>> a = freeze(_TestClass(True)) >>> b = freeze(_TestClass(True)) >>> b == a True >>> x = _TestClass(True) >>> a = freeze(dump(x)) >>> b = freeze(dump(x)) >>> b == a True When you dump-freeze only content/type counts, same content/type same hash - Two object of the same type with same content will be equal - Two object of the different type with same content will be different >>> a = hash(freeze(dump(_TestClass(True)))) >>> b = hash(freeze(dump(_TestClass(True)))) >>> b == a True >>> a = freeze(dump(_TestClass(True))) >>> b = freeze(dump(_TestClass(True))) >>> b == a True >>> a = hash(freeze(dump(_TestClass(True)))) >>> b = hash(freeze(dump(_TestClass2(True)))) >>> b != a True >>> a = freeze(dump(_TestClass(True))) >>> b = freeze(dump(_TestClass2(True))) >>> b != a True >>> _py2_to_py3(vformat(dump([1, {'a' : 'b'}]))) [1, ["<class 'dict'>", {'a': 'b'}]] >>> vformat(recursive_sort(dump(_TestClass(True)))) ["<class 'freeze.xfreeze._TestClass'>", (('a', 'huhu'), ('sub', ["<class 'freeze.xfreeze._TestSlots'>", (('a', 'slot'), ('b', (1, (1, 2, 3), 2, 3)))]))] >>> a = _TestSlots() >>> b = [a, 1, 2, [a, "banane"]] >>> _no_null_x(vformat(dump(b))) {'a': 'slot', 'b': [1, 2, 3, (1, 2, 3)]}], 1, 2, 'banane']] >>> a = [1, 2] >>> _no_null_x(vformat(dump((a, (a, a))))) ([1, 2], ([1, 2], [1, 2])) >>> recursive_sort(dump(freeze(_TestClass(True)))) (('a', 'huhu'), ((('a', 'slot'), ('b', (1, (1, 2, 3), 2, 3))), 'sub')) >>> dump((None, (None, None))) (None, (None, None)) >>> s = _TestClassWithLen() >>> a = [s, s] >>> _no_null_x(vformat(dump(a))) {'a': 'huhu'}], >>> s = (1, 2) >>> a = [s, s] >>> _no_null_x(vformat(dump(a))) [(1, 2), (1, 2)] """ identity_set = set() dup_set = set() def dump_helper(data_structure): if data_structure is None: return None # Primitive types don't need processing if isinstance(data_structure, _primitive_types): return data_structure # Cycle detection idd = id(data_structure) if idd in identity_set: # We do not recurse into containers tlen = -1 try: tlen = len(data_structure) except: # pragma: no cover pass if tlen != -1: # We do not recurse into dictizable objects if ( hasattr(data_structure, "__dict__") or hasattr(data_structure, "__slots__") ): # Special case where __len__ is implemented dup_set.add(idd) return "R: %s at 0x%X" % (type(data_structure), idd) # Except string and tuples if not isinstance( data_structure, _ignore_types ): # pragma: no cover dup_set.add(idd) return "R: %s at 0x%X" % (type(data_structure), idd) else: dup_set.add(idd) return "R: %s at 0x%X" % (type(data_structure), idd) else: identity_set.add(idd) ret = Meta() ret.append(IDD(data_structure, idd)) was_dict = isinstance(data_structure, WasDict) was_tuple = isinstance(data_structure, tuple) if not was_dict: if hasattr(data_structure, "__slots__"): data_structure = dict(object_to_items(data_structure)) was_dict = True else: # Dictize if possible (support objects) try: data_structure = data_structure.__dict__ was_dict = True except: pass # Itemize if possible try: data_structure = data_structure.items() ret.append(dict([ (dump_helper(x[0]), dump_helper(x[1])) for x in data_structure ])) return ret except: pass tlen = -1 # If item has a length we dump it try: tlen = len(data_structure) except: # pragma: no cover pass if tlen != -1: # Well there are classes out in the wild that answer to len # but have no indexer. try: if was_dict: ret.append(WasDict([ (dump_helper(x[0]), dump_helper(x[1])) for x in data_structure ])) elif was_tuple: ret.append(tuple([ dump_helper(x) for x in data_structure ])) else: ret.append([ dump_helper(x) for x in data_structure ]) return ret except: # pragma: no cover pass ret.append(data_structure) # pragma: no cover return ret # pragma: no cover def clean_up(data_structure): if isinstance(data_structure, Meta): idd_temp = data_structure[0] idd_temp.is_target = idd_temp.idd in dup_set if not ( idd_temp.is_target or idd_temp.is_visible ): del data_structure[0] if len(data_structure) == 1: data_structure = data_structure[0] # We don't clean strings if not isinstance(data_structure, _string_types): tlen = -1 # If item has a length we clean it try: tlen = len(data_structure) except: pass if tlen != -1: if isinstance(data_structure, dict): for k in data_structure.keys(): data_structure[k] = clean_up(data_structure[k]) elif isinstance(data_structure, Meta): data_structure = Meta([ clean_up(x) for x in data_structure ]) elif isinstance(data_structure, tuple): data_structure = tuple([ clean_up(x) for x in data_structure ]) else: data_structure = [ clean_up(x) for x in data_structure ] return data_structure data = clean_up(dump_helper(data_structure)) return data
java
public void inc(String name, int con) throws IOException { if (hasLocalVariable(name)) { tinc(name, con); } else { VariableElement field = El.getField(subClass, name); if (field == null) { throw new IllegalArgumentException("field "+name+" not found"); } aload(0); getField(field); TypeMirror type = field.asType(); tconst(type, con); tadd(type); aload(0); swap(); putField(field); } }
java
static Generator serialMinuteGenerator(final int interval, final DateValue dtStart) { final TimeValue dtStartTime = TimeUtils.timeOf(dtStart); return new Generator() { int minute = dtStartTime.minute() - interval; int hour = dtStartTime.hour(); int day = dtStart.day(); int month = dtStart.month(); int year = dtStart.year(); @Override boolean generate(DTBuilder builder) { int nminute; if (hour != builder.hour || day != builder.day || month != builder.month || year != builder.year) { int minutesBetween = (daysBetween(builder, year, month, day) * 24 + builder.hour - hour) * 60 - minute; nminute = ((interval - (minutesBetween % interval)) % interval); if (nminute > 59) { /* * Don't update day so that the difference calculation * above is correct when this function is reentered with * a different day. */ return false; } hour = builder.hour; day = builder.day; month = builder.month; year = builder.year; } else { nminute = minute + interval; if (nminute > 59) { return false; } } minute = builder.minute = nminute; return true; } @Override public String toString() { return "serialMinuteGenerator:" + interval; } }; }
java
public static double atof(String s) { int i = 0; int sign = 1; double r = 0; // integer part double p = 1; // exponent of fractional part int state = 0; // 0 = int part, 1 = frac part while (i < s.length() && Character.isWhitespace(s.charAt(i))) i++; if (i < s.length() && s.charAt(i) == '-') { sign = -1; i++; } else if (i < s.length() && s.charAt(i) == '+') { i++; } while (i < s.length()) { char ch = s.charAt(i); if ('0' <= ch && ch <= '9') { if (state == 0) r = r * 10 + ch - '0'; else if (state == 1) { p = p / 10; r = r + p * (ch - '0'); } } else if (ch == '.') { if (state == 0) state = 1; else return sign * r; } else if (ch == 'e' || ch == 'E' || ch == 'd' || ch == 'D') { long e = (int) parseLong(s.substring(i + 1), 10); return sign * r * Math.pow(10, e); } else return sign * r; i++; } return sign * r; }
java
public ServiceBuilder withStorageFactory(Function<ComponentSetup, StorageFactory> storageFactoryCreator) { Preconditions.checkNotNull(storageFactoryCreator, "storageFactoryCreator"); this.storageFactoryCreator = storageFactoryCreator; return this; }
java
public void marshall(StackError stackError, ProtocolMarshaller protocolMarshaller) { if (stackError == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(stackError.getErrorCode(), ERRORCODE_BINDING); protocolMarshaller.marshall(stackError.getErrorMessage(), ERRORMESSAGE_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def values_list( self, *fields_: str, flat: bool = False ) -> "ValuesListQuery": # pylint: disable=W0621 """ Make QuerySet returns list of tuples for given args instead of objects. If ```flat=True`` and only one arg is passed can return flat list. """ return ValuesListQuery( db=self._db, model=self.model, q_objects=self._q_objects, flat=flat, fields_for_select_list=fields_, distinct=self._distinct, limit=self._limit, offset=self._offset, orderings=self._orderings, annotations=self._annotations, custom_filters=self._custom_filters, )
python
def _gsa_force(grav, mass_i, mass_j, position_i, position_j): """Gives the force of solution j on solution i. Variable name in GSA paper given in () args: grav: The gravitational constant. (G) mass_i: The mass of solution i (derived from fitness). (M_i) mass_j: The mass of solution j (derived from fitness). (M_j) position_i: The position of solution i. (x_i) position_j: The position of solution j. (x_j) returns: numpy.array; The force vector of solution j on solution i. """ position_diff = numpy.subtract(position_j, position_i) distance = numpy.linalg.norm(position_diff) # The first 3 terms give the magnitude of the force # The last term is a vector that provides the direction # Epsilon prevents divide by zero errors return grav * (mass_i * mass_j) / (distance + EPSILON) * position_diff
java
private static void usage() { display("Usage: CSVLoader <params>"); display("where <params> are:"); display(" -app <name> Doradus application name. Default is: {}", CSVConfig.DEFAULT_APPNAME); display(" -batchsize <#> Batch size. Default is: {}", CSVConfig.DEFAULT_BATCH_SIZE); display(" -compress [T|F] Compress messages. Default is: {}", CSVConfig.DEFAULT_COMPRESS); display(" -host <host> Doradus server host name. Default is: {}", CSVConfig.DEFAULT_HOST); display(" -id <name> Column name of ID field. Default is: {}", CSVConfig.DEFAULT_ID_FIELD); display(" -increment_ts [T|F] True to increment timestamp fields 1 day per batch. Default is: {}", CSVConfig.DEFAULT_INCREMENT_TS); display(" -merge_all [T|F] (OLAP only): true to merge after every batch. Default is: {}", CSVConfig.DEFAULT_MERGE_ALL); display(" -password <pw> Password for tenant. Default is: {}", CSVConfig.DEFAULT_PASSWORD); display(" -port <port> Doradus server port. Default is: {}", CSVConfig.DEFAULT_PORT); display(" -root <folder> Root folder of CSV files. Default is: {}", CSVConfig.DEFAULT_ROOT); display(" -schema <file> Name of application schema file. Default is: <app name>.xml"); display(" -skip_undef [T|F] True to skip fields not declared in the schema. Default is: {}", CSVConfig.DEFAULT_SKIP_UNDEF); display(" -shard <name> (OLAP only): Name of shard to load. Default is: {}", CSVConfig.DEFAULT_SHARD); display(" -tenant <name> Name of tenant to use. Default is: {}", CSVConfig.DEFAULT_TENANT); display(" -user <ID> User ID for tenant. Default is: {}", CSVConfig.DEFAULT_USER); display(" -workers <#> # of worker threads. Default is: {}", CSVConfig.DEFAULT_WORKERS); display("To use TLS:"); display(" -tls [T|F] True to enable TLS/SSL. Default is: {}", CSVConfig.DEFAULT_TLS); display(" -keystore <file> File name of keystore. Default is: {}", CSVConfig.DEFAULT_KEYSTORE); display(" -keystorepassword <pw> Password of keystore file. Default is: {}", CSVConfig.DEFAULT_KEYSTORE_PW); display(" -truststore <file> File name of truststore. Default is: {}", CSVConfig.DEFAULT_TRUSTSTORE); display(" -truststorepassword <pw> Password of truststore file. Default is: {}", CSVConfig.DEFAULT_TRUSTSTORE_PW); display("Deletes and recreates OLAP or Spider application defined by 'schema' file, then loads all CSV"); display("files found in 'root' folder."); System.exit(0); }
python
def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: install_sources: | - "ppa:foo" - "http://example.com/repo precise main" install_keys: | - null - "a1b2c3d4" Note that 'null' (a.k.a. None) should not be quoted. """ sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): raise SourceConfigError( 'Install sources and keys lists are different lengths') for source, key in zip(sources, keys): add_source(source, key) if update: _fetch_update(fatal=True)
java
public DescribeAggregationAuthorizationsResult withAggregationAuthorizations(AggregationAuthorization... aggregationAuthorizations) { if (this.aggregationAuthorizations == null) { setAggregationAuthorizations(new com.amazonaws.internal.SdkInternalList<AggregationAuthorization>(aggregationAuthorizations.length)); } for (AggregationAuthorization ele : aggregationAuthorizations) { this.aggregationAuthorizations.add(ele); } return this; }
python
def parse_officials(self): """ Parse the officials :returns: ``self`` on success, ``None`` otherwise """ # begin proper body of method lx_doc = self.html_doc() off_parser = opm(self.game_key.season) self.officials = off_parser(lx_doc) return self if self.officials else None
java
public static <K, V> Map<K, V> drop(Map<K, V> self, int num) { if (self.size() <= num) { return createSimilarMap(self); } if (num == 0) { return cloneSimilarMap(self); } Map<K, V> ret = createSimilarMap(self); for (Map.Entry<K, V> entry : self.entrySet()) { K key = entry.getKey(); V value = entry.getValue(); if (num-- <= 0) { ret.put(key, value); } } return ret; }
java
public static AclEntry fromCliString(String stringEntry) { if (stringEntry == null) { throw new IllegalArgumentException("Input acl string is null"); } List<String> components = Arrays.stream(stringEntry.split(":")).map(String::trim).collect( Collectors.toList()); if (!((components.size() == 3 && !components.get(0).equals(DEFAULT_KEYWORD)) || (components.size() == 4 && components.get(0).equals(DEFAULT_KEYWORD)))) { throw new IllegalArgumentException("Unexpected acl components: " + stringEntry); } AclEntry.Builder builder = new AclEntry.Builder(); int startingIndex = 0; if (components.get(0).equals(DEFAULT_KEYWORD)) { startingIndex = 1; builder.setIsDefault(true); } else { builder.setIsDefault(false); } String type = components.get(startingIndex + 0); String subject = components.get(startingIndex + 1); String actions = components.get(startingIndex + 2); if (type.isEmpty()) { throw new IllegalArgumentException("ACL entry type is empty: " + stringEntry); } switch (type) { case AclEntryType.USER_COMPONENT: if (subject.isEmpty()) { builder.setType(AclEntryType.OWNING_USER); } else { builder.setType(AclEntryType.NAMED_USER); } break; case AclEntryType.GROUP_COMPONENT: if (subject.isEmpty()) { builder.setType(AclEntryType.OWNING_GROUP); } else { builder.setType(AclEntryType.NAMED_GROUP); } break; case AclEntryType.MASK_COMPONENT: if (!subject.isEmpty()) { throw new IllegalArgumentException( "Subject for acl mask type must be empty: " + stringEntry); } builder.setType(AclEntryType.MASK); break; case AclEntryType.OTHER_COMPONENT: if (!subject.isEmpty()) { throw new IllegalArgumentException( "Subject for acl other type must be empty: " + stringEntry); } builder.setType(AclEntryType.OTHER); break; default: throw new IllegalArgumentException("Unexpected ACL entry type: " + stringEntry); } builder.setSubject(subject); Mode.Bits bits = Mode.Bits.fromString(actions); for (AclAction action : bits.toAclActionSet()) { builder.addAction(action); } return builder.build(); }
python
def _setfile(self, filename, length, offset): """Use file as source of bits.""" source = open(filename, 'rb') if offset is None: offset = 0 if length is None: length = os.path.getsize(source.name) * 8 - offset byteoffset, offset = divmod(offset, 8) bytelength = (length + byteoffset * 8 + offset + 7) // 8 - byteoffset m = MmapByteArray(source, bytelength, byteoffset) if length + byteoffset * 8 + offset > m.filelength * 8: raise CreationError("File is not long enough for specified " "length and offset.") self._datastore = ConstByteStore(m, length, offset)
python
def bbox_transform(ex_rois, gt_rois, box_stds): """ compute bounding box regression targets from ex_rois to gt_rois :param ex_rois: [N, 4] :param gt_rois: [N, 4] :return: [N, 4] """ assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number' ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0 ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0 ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0) ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0) gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0 gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0 gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0) gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0) targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0] targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1] targets_dw = np.log(gt_widths / ex_widths) / box_stds[2] targets_dh = np.log(gt_heights / ex_heights) / box_stds[3] targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose() return targets
python
def get_sequences(self, pdb_id = None): '''Create Sequence objects for each FASTA sequence.''' sequences = {} if pdb_id: for chain_id, sequence in self.get(pdb_id, {}).iteritems(): sequences[chain_id] = Sequence.from_sequence(chain_id, sequence) else: for pdb_id, v in self.iteritems(): sequences[pdb_id] = {} for chain_id, sequence in v.iteritems(): sequences[pdb_id][chain_id] = Sequence.from_sequence(chain_id, sequence) return sequences
java
public Element addClass(String className) { Validate.notNull(className); Set<String> classes = classNames(); classes.add(className); classNames(classes); return this; }
java
@RequestMapping(value = "/cloud/volume/refresh", method = POST, consumes = APPLICATION_JSON_VALUE, produces = APPLICATION_JSON_VALUE) public ResponseEntity<Collection<String>> refreshVolumes( @Valid @RequestBody CloudVolumeListRefreshRequest request) { return ResponseEntity.ok().body(cloudVolumeService.refreshVolumes(request)); }
java
public static Response.ResponseBuilder makeCORS(Response.ResponseBuilder responseBuilder, String returnMethod) { // TODO(william): Make origin, methods, and headers configurable. Response.ResponseBuilder rb = responseBuilder.header("Access-Control-Allow-Origin", "*") .header("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); if (!"".equals(returnMethod)) { rb.header("Access-Control-Allow-Headers", returnMethod); } return rb; }
java
public void marshall(UpdateVirtualServiceRequest updateVirtualServiceRequest, ProtocolMarshaller protocolMarshaller) { if (updateVirtualServiceRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateVirtualServiceRequest.getClientToken(), CLIENTTOKEN_BINDING); protocolMarshaller.marshall(updateVirtualServiceRequest.getMeshName(), MESHNAME_BINDING); protocolMarshaller.marshall(updateVirtualServiceRequest.getSpec(), SPEC_BINDING); protocolMarshaller.marshall(updateVirtualServiceRequest.getVirtualServiceName(), VIRTUALSERVICENAME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def find_version_by_regex(file_source): # type: (str)->Optional[str] """ Regex for dunder version """ if not file_source: return None version_match = re.search(r"^version=['\"]([^'\"]*)['\"]", file_source, re.M) if version_match: return version_match.group(1) return None
python
async def expn( self, address: str, timeout: DefaultNumType = _default ) -> SMTPResponse: """ Send an SMTP EXPN command, which expands a mailing list. Not many servers support this command. :raises SMTPResponseException: on unexpected server response code """ await self._ehlo_or_helo_if_needed() parsed_address = parse_address(address) async with self._command_lock: response = await self.execute_command( b"EXPN", parsed_address.encode("ascii"), timeout=timeout ) if response.code != SMTPStatus.completed: raise SMTPResponseException(response.code, response.message) return response
java
public void setValidationParameter(byte[] validationParameter) throws InvalidArgumentException { if (null == validationParameter) { throw new InvalidArgumentException("The valdiationParameter parameter can not be null."); } this.validationParameter = ByteString.copyFrom(validationParameter); }
python
def get_manual_homology_models(self, input_dict, outdir=None, clean=True, force_rerun=False): """Copy homology models to the GEM-PRO project. Requires an input of a dictionary formatted like so:: { model_gene: { homology_model_id1: { 'model_file': '/path/to/homology/model.pdb', 'file_type': 'pdb' 'additional_info': info_value }, homology_model_id2: { 'model_file': '/path/to/homology/model.pdb' 'file_type': 'pdb' } } } Args: input_dict (dict): Dictionary of dictionaries of gene names to homology model IDs and other information outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially clean (bool): If homology files should be cleaned and saved as a new PDB file force_rerun (bool): If homology files should be copied again even if they exist in the GEM-PRO directory """ if outdir: outdir_set = True else: outdir_set = False counter = 0 for g in tqdm(self.genes): if g.id not in input_dict: continue if not outdir_set: outdir = g.protein.structure_dir if not outdir: raise ValueError('Output directory must be specified') for hid, hdict in input_dict[g.id].items(): if 'model_file' not in hdict or 'file_type' not in hdict: raise KeyError('"model_file" and "file_type" must be keys in the manual input dictionary.') new_homology = g.protein.load_pdb(pdb_id=hid, pdb_file=hdict['model_file'], file_type=hdict['file_type'], is_experimental=False) if clean: new_homology.load_structure_path(new_homology.clean_structure(outdir=outdir, force_rerun=force_rerun), hdict['file_type']) else: copy_to = op.join(outdir, op.basename(hdict['model_file'])) if ssbio.utils.force_rerun(force_rerun, copy_to): # Just copy the file to the structure directory and store the file name log.debug('{}: copying model from original directory to GEM-PRO directory'.format(op.basename(hdict['model_file']))) shutil.copy2(hdict['model_file'], outdir) new_homology.load_structure_path(copy_to, hdict['file_type']) else: log.debug('{}: homology model already copied to directory'.format(copy_to)) new_homology.load_structure_path(copy_to, hdict['file_type']) # TODO: need to better handle other info in the provided dictionary, if any new_homology.update(hdict) log.debug('{}: updated homology model information and copied model file.'.format(g.id)) counter += 1 log.info('Updated homology model information for {} genes.'.format(counter))
python
def GetRosettaResidueMap(self, ConvertMSEToAtom = False, RemoveIncompleteFinalResidues = False, RemoveIncompleteResidues = False): '''Note: This function ignores any DNA.''' raise Exception('This code looks to be deprecated. Use construct_pdb_to_rosetta_residue_map instead.') chain = None sequences = {} residue_map = {} resid_set = set() resid_list = [] DNA_residues = set([' DA', ' DC', ' DG', ' DT']) chains = [] self.RAW_ATOM_SEQUENCE = [] essential_atoms_1 = set(['CA', 'C', 'N'])#, 'O']) essential_atoms_2 = set(['CA', 'C', 'N'])#, 'OG']) current_atoms = set() atoms_read = {} oldchainID = None removed_residue = {} for line in self.lines: if line[0:4] == 'ATOM' or (ConvertMSEToAtom and (line[0:6] == 'HETATM') and (line[17:20] == 'MSE')): chainID = line[21] if missing_chain_ids.get(self.pdb_id): chainID = missing_chain_ids[self.pdb_id] if chainID not in chains: chains.append(chainID) residue_longname = line[17:20] if residue_longname in DNA_residues: # Skip DNA continue if residue_longname == 'UNK': # Skip unknown residues continue if residue_longname not in allowed_PDB_residues_types and not(ConvertMSEToAtom and residue_longname == 'MSE'): if not self.strict: # Skip unknown residues continue else: raise NonCanonicalResidueException("Residue %s encountered: %s" % (line[17:20], line)) else: resid = line[21:27] #print(chainID, residue_longname, resid) #print(line) #print(resid_list) if resid not in resid_set: removed_residue[chainID] = False add_residue = True if current_atoms: if RemoveIncompleteResidues and essential_atoms_1.intersection(current_atoms) != essential_atoms_1 and essential_atoms_2.intersection(current_atoms) != essential_atoms_2: oldChain = resid_list[-1][0] oldResidueID = resid_list[-1][1:] print("The last residue '%s', %s, in chain %s is missing these atoms: %s." % (resid_list[-1], residue_longname, oldChain, essential_atoms_1.difference(current_atoms) or essential_atoms_2.difference(current_atoms))) resid_set.remove(resid_list[-1]) #print("".join(resid_list)) resid_list = resid_list[:-1] if oldchainID: removed_residue[oldchainID] = True #print("".join(resid_list)) #print(sequences[oldChain]) if sequences.get(oldChain): sequences[oldChain] = sequences[oldChain][:-1] if residue_map.get(oldChain): residue_map[oldChain] = residue_map[oldChain][:-1] #print(sequences[oldChain] else: assert(not(resid_set)) current_atoms = set() atoms_read[chainID] = set() atoms_read[chainID].add(line[12:15].strip()) resid_set.add(resid) resid_list.append(resid) chainID = line[21] sequences[chainID] = sequences.get(chainID, []) if residue_longname in non_canonical_amino_acids: sequences[chainID].append(non_canonical_amino_acids[residue_longname]) else: sequences[chainID].append(residue_type_3to1_map[residue_longname]) residue_map[chainID] = residue_map.get(chainID, []) if residue_longname in non_canonical_amino_acids: residue_map[chainID].append((resid, non_canonical_amino_acids[residue_longname])) else: residue_map[chainID].append((resid, residue_type_3to1_map[residue_longname])) oldchainID = chainID else: #atoms_read[chainID] = atoms_read.get(chainID, set()) atoms_read[chainID].add(line[12:15].strip()) current_atoms.add(line[12:15].strip()) if RemoveIncompleteFinalResidues: # These are (probably) necessary for Rosetta to keep the residue. Rosetta does throw away residues where only the N atom is present if that residue is at the end of a chain. for chainID, sequence_list in sequences.iteritems(): if not(removed_residue[chainID]): if essential_atoms_1.intersection(atoms_read[chainID]) != essential_atoms_1 and essential_atoms_2.intersection(atoms_read[chainID]) != essential_atoms_2: print("The last residue %s of chain %s is missing these atoms: %s." % (sequence_list[-1], chainID, essential_atoms_1.difference(atoms_read[chainID]) or essential_atoms_2.difference(atoms_read[chainID]))) oldResidueID = sequence_list[-1][1:] residue_map[chainID] = residue_map[chainID][0:-1] sequences[chainID] = sequence_list[0:-1] for chainID, sequence_list in sequences.iteritems(): sequences[chainID] = "".join(sequence_list) assert(sequences[chainID] == "".join([res_details[1] for res_details in residue_map[chainID]])) for chainID in chains: for a_acid in sequences.get(chainID, ""): self.RAW_ATOM_SEQUENCE.append((chainID, a_acid)) residue_objects = {} for chainID in residue_map.keys(): residue_objects[chainID] = [] for chainID, residue_list in residue_map.iteritems(): for res_pair in residue_list: resid = res_pair[0] resaa = res_pair[1] assert(resid[0] == chainID) residue_objects[chainID].append((resid[1:].strip(), resaa)) return sequences, residue_objects
python
def sec(self, s): """ Parse a public pair as a text SEC. Return a :class:`Key <pycoin.key.Key>` or None. """ pair = parse_colon_prefix(s) if pair is not None and pair[0] == self._wif_prefix: s = pair[1] try: sec = h2b(s) return self._network.keys.public(sec) except Exception: pass
java
public static boolean allEquals(final String value, final String... strings) { if (strings != null) { for (final String s : strings) { if (s == null && value != null || s != null && !s.equals(value)) { return false; } } } else { return value == null; } return true; }
java
public OneLoginResponse<User> getUsersBatch(int batchSize) throws OAuthSystemException, OAuthProblemException, URISyntaxException { return getUsersBatch(batchSize, null); }
python
def get_block_height(self, is_full: bool = False) -> int or dict: """ This interface is used to get the decimal block height in current network. Return: the decimal total height of blocks in current network. """ response = self.get_block_count(is_full=True) response['result'] -= 1 if is_full: return response return response['result']
java
public void marshall(Entitlement entitlement, ProtocolMarshaller protocolMarshaller) { if (entitlement == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(entitlement.getDescription(), DESCRIPTION_BINDING); protocolMarshaller.marshall(entitlement.getEncryption(), ENCRYPTION_BINDING); protocolMarshaller.marshall(entitlement.getEntitlementArn(), ENTITLEMENTARN_BINDING); protocolMarshaller.marshall(entitlement.getName(), NAME_BINDING); protocolMarshaller.marshall(entitlement.getSubscribers(), SUBSCRIBERS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public Float set(int index, Float element) { return set(index, element.floatValue()); }
java
private static void appendPathElt(final Object pathElt, final StringBuilder buf) { if (buf.length() > 0) { buf.append(File.pathSeparatorChar); } // Escape any rogue path separators, as long as file separator is not '\\' (on Windows, if there are any // extra ';' characters in a path element, there's really nothing we can do to escape them, since they can't // be escaped as "\\;") final String path = File.separatorChar == '\\' ? pathElt.toString() : pathElt.toString().replaceAll(File.pathSeparator, "\\" + File.pathSeparator); buf.append(path); }
java
final int awaitJoin(WorkQueue w, ForkJoinTask<?> task, long deadline) { int s = 0; if (w != null) { ForkJoinTask<?> prevJoin = w.currentJoin; if (task != null && (s = task.status) >= 0) { w.currentJoin = task; CountedCompleter<?> cc = (task instanceof CountedCompleter) ? (CountedCompleter<?>)task : null; for (;;) { if (cc != null) helpComplete(w, cc, 0); else helpStealer(w, task); if ((s = task.status) < 0) break; long ms, ns; if (deadline == 0L) ms = 0L; else if ((ns = deadline - System.nanoTime()) <= 0L) break; else if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) <= 0L) ms = 1L; if (tryCompensate(w)) { task.internalWait(ms); U.getAndAddLong(this, CTL, AC_UNIT); } if ((s = task.status) < 0) break; } w.currentJoin = prevJoin; } } return s; }
python
def coarsen_data(x, y, ey=None, ex=None, level=2, exponential=False): """ Coarsens the supplied data set. Returns coarsened arrays of x, y, along with quadrature-coarsened arrays of ey and ex if specified. Parameters ---------- x, y Data arrays. Can be lists (will convert to numpy arrays). These are coarsened by taking an average. ey=None, ex=None y and x uncertainties. Accepts arrays, lists, or numbers. These are coarsened by averaging in quadrature. level=2 For linear coarsening (default, see below), every n=level points will be averaged together (in quadrature for errors). For exponential coarsening, bins will be spaced by the specified scaling=level factor; for example, level=1.4 will group points within 40% of each other's x values. This is a great option for log-x plots, as the outcome will be evenly spaced. exponential=False If False, coarsen using linear spacing. If True, the bins will be exponentially spaced by the specified level. """ # Normal coarsening if not exponential: # Coarsen the data xc = coarsen_array(x, level, 'mean') yc = coarsen_array(y, level, 'mean') # Coarsen the y error in quadrature if not ey is None: if not is_iterable(ey): ey = [ey]*len(y) eyc = _n.sqrt(coarsen_array(_n.power(ey,2)/level, level, 'mean')) # Coarsen the x error in quadrature if not ex is None: if not is_iterable(ey): ex = [ex]*len(x) exc = _n.sqrt(coarsen_array(_n.power(ex,2)/level, level, 'mean')) # Exponential coarsen else: # Make sure the data are arrays x = _n.array(x) y = _n.array(y) # Create the new arrays to fill xc = [] yc = [] if not ey is None: if not is_iterable(ey): ey = _n.array([ey]*len(y)) eyc = [] if not ex is None: if not is_iterable(ex): ex = _n.array([ex]*len(x)) exc = [] # Find the first element that is greater than zero x0 = x[x>0][0] # Now loop over the exponential bins n = 0 while x0*level**n < x[-1]: # Get all the points between x[n] and x[n]*r mask = _n.logical_and(x0*level**n <= x, x < x0*level**(n+1)) # Only do something if points exist from this range! if len(x[mask]): # Take the average x value xc.append(_n.average(x[mask])) yc.append(_n.average(y[mask])) # do the errors in quadrature if not ey is None: eyc.append(_n.sqrt(_n.average((ey**2)[mask])/len(ey[mask]))) if not ex is None: exc.append(_n.sqrt(_n.average((ex**2)[mask])/len(ex[mask]))) # Increment the counter n += 1 # Done exponential loop # Done coarsening # Return depending on situation if ey is None and ex is None: return _n.array(xc), _n.array(yc) elif ex is None : return _n.array(xc), _n.array(yc), _n.array(eyc) elif ey is None : return _n.array(xc), _n.array(yc), _n.array(exc) else : return _n.array(xc), _n.array(yc), _n.array(eyc), _n.array(exc)
python
def do_check_artifact_cache(self, vts, post_process_cached_vts=None): """Checks the artifact cache for the specified list of VersionedTargetSets. Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were satisfied/unsatisfied from the cache. """ if not vts: return [], [], [] read_cache = self._cache_factory.get_read_cache() items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None) for vt in vts] res = self.context.subproc_map(call_use_cached_files, items) cached_vts = [] uncached_vts = [] uncached_causes = [] # Note that while the input vts may represent multiple targets (for tasks that overrride # check_artifact_cache_for), the ones we return must represent single targets. # Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired # with why it is missed for stat reporting purpose. for vt, was_in_cache in zip(vts, res): if was_in_cache: cached_vts.extend(vt.versioned_targets) else: uncached_vts.extend(vt.versioned_targets) uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets))) if isinstance(was_in_cache, UnreadableArtifact): self._cache_key_errors.update(was_in_cache.key) if post_process_cached_vts: post_process_cached_vts(cached_vts) for vt in cached_vts: vt.update() return cached_vts, uncached_vts, uncached_causes
python
def from_btl(fname): """ DataFrame constructor to open Seabird CTD BTL-ASCII format. Examples -------- >>> from pathlib import Path >>> import ctd >>> data_path = Path(__file__).parents[1].joinpath("tests", "data") >>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl')) """ f = _read_file(fname) metadata = _parse_seabird(f.readlines(), ftype="btl") f.seek(0) df = pd.read_fwf( f, header=None, index_col=False, names=metadata["names"], parse_dates=False, skiprows=metadata["skiprows"], ) f.close() # At this point the data frame is not correctly lined up (multiple rows # for avg, std, min, max or just avg, std, etc). # Also needs date,time,and bottle number to be converted to one per line. # Get row types, see what you have: avg, std, min, max or just avg, std. rowtypes = df[df.columns[-1]].unique() # Get times and dates which occur on second line of each bottle. dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True) times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True) datetimes = dates + " " + times # Fill the Date column with datetimes. df.loc[:: len(rowtypes), "Date"] = datetimes.values df.loc[1 :: len(rowtypes), "Date"] = datetimes.values # Fill missing rows. df["Bottle"] = df["Bottle"].fillna(method="ffill") df["Date"] = df["Date"].fillna(method="ffill") df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg name = _basename(fname)[1] dtypes = { "bpos": int, "pumps": bool, "flag": bool, "Bottle": int, "Scan": int, "Statistic": str, "Date": str, } for column in df.columns: if column in dtypes: df[column] = df[column].astype(dtypes[column]) else: try: df[column] = df[column].astype(float) except ValueError: warnings.warn("Could not convert %s to float." % column) df["Date"] = pd.to_datetime(df["Date"]) metadata["name"] = str(name) setattr(df, "_metadata", metadata) return df
python
def hcons(xmrs): """Return the list of all HandleConstraints in *xmrs*.""" return [ HandleConstraint(hi, reln, lo) for hi, reln, lo in sorted(xmrs.hcons(), key=lambda hc: var_id(hc[0])) ]
python
def blend(self, other): """Alpha blend *other* on top of the current image.""" raise NotImplementedError("This method has not be implemented for " "xarray support.") if self.mode != "RGBA" or other.mode != "RGBA": raise ValueError("Images must be in RGBA") src = other dst = self outa = src.channels[3] + dst.channels[3] * (1 - src.channels[3]) for i in range(3): dst.channels[i] = (src.channels[i] * src.channels[3] + dst.channels[i] * dst.channels[3] * (1 - src.channels[3])) / outa dst.channels[i][outa == 0] = 0 dst.channels[3] = outa
python
def populate(self): """Populates a new cache. """ if self.exists: raise CacheAlreadyExistsException('location: %s' % self.cache_uri) self._populate_setup() with closing(self.graph): with self._download_metadata_archive() as metadata_archive: for fact in self._iter_metadata_triples(metadata_archive): self._add_to_graph(fact)
python
def user_hostname(self, vm_name=None): ''' Return a string combining user and hostname, e.g. '[email protected]'. This string is suitable for use in an ssh commmand. If user is None or empty, it will be left out of the string, e.g. 'localhost'. If hostname is None, have bigger problems. Raises an Exception if the Vagrant box has not yet been created or has been destroyed. ''' user = self.user(vm_name=vm_name) user_prefix = user + '@' if user else '' return user_prefix + self.hostname(vm_name=vm_name)
python
def bag(directory, mets_basename, dest, identifier, in_place, manifestation_depth, mets, base_version_checksum, tag_file, skip_zip, processes): """ Bag workspace as OCRD-ZIP at DEST """ resolver = Resolver() workspace = Workspace(resolver, directory=directory, mets_basename=mets_basename) workspace_bagger = WorkspaceBagger(resolver) workspace_bagger.bag( workspace, dest=dest, ocrd_identifier=identifier, ocrd_manifestation_depth=manifestation_depth, ocrd_mets=mets, ocrd_base_version_checksum=base_version_checksum, processes=processes, tag_files=tag_file, skip_zip=skip_zip, in_place=in_place )
java
public static final int getShort(byte[] data, int offset) { int result = 0; int i = offset; for (int shiftBy = 0; shiftBy < 16; shiftBy += 8) { result |= ((data[i] & 0xff)) << shiftBy; ++i; } return result; }
python
def parse_title(self, docname): """Parse a document title as the first line starting in [A-Za-z0-9<] or fall back to the document basename if no such line exists. The cmake --help-*-list commands also depend on this convention. Return the title or False if the document file does not exist. """ env = self.document.settings.env title = self.titles.get(docname) if title is None: fname = os.path.join(env.srcdir, docname+'.rst') try: f = open(fname, 'r') except IOError: title = False else: for line in f: if len(line) > 0 and (line[0].isalnum() or line[0] == '<'): title = line.rstrip() break f.close() if title is None: title = os.path.basename(docname) self.titles[docname] = title return title
java
public static byte[] encryptBytes (PublicKey key, byte[] secret, byte[] salt) { byte[] encrypt = new byte[secret.length + salt.length]; for (int ii = 0; ii < secret.length; ii++) { encrypt[ii] = secret[ii]; } for (int ii = 0; ii < salt.length; ii++) { encrypt[secret.length + ii] = salt[ii]; } try { return getRSACipher(key).doFinal(encrypt); } catch (GeneralSecurityException gse) { log.warning("Failed to encrypt bytes", gse); } return encrypt; }
java
public static Object autoMap(Object o, Class<?> cls) { if (o == null) return o; else if (cls.isAssignableFrom(o.getClass())) { return o; } else { if (o instanceof java.sql.Date) { java.sql.Date d = (java.sql.Date) o; if (cls.isAssignableFrom(Long.class)) return d.getTime(); else if (cls.isAssignableFrom(BigInteger.class)) return BigInteger.valueOf(d.getTime()); else return o; } else if (o instanceof java.sql.Timestamp) { Timestamp t = (java.sql.Timestamp) o; if (cls.isAssignableFrom(Long.class)) return t.getTime(); else if (cls.isAssignableFrom(BigInteger.class)) return BigInteger.valueOf(t.getTime()); else return o; } else if (o instanceof java.sql.Time) { Time t = (java.sql.Time) o; if (cls.isAssignableFrom(Long.class)) return t.getTime(); else if (cls.isAssignableFrom(BigInteger.class)) return BigInteger.valueOf(t.getTime()); else return o; } else if (o instanceof Blob && cls.isAssignableFrom(byte[].class)) { return toBytes((Blob) o); } else if (o instanceof Clob && cls.isAssignableFrom(String.class)) { return toString((Clob) o); } else if (o instanceof BigInteger && cls.isAssignableFrom(Long.class)) { return ((BigInteger) o).longValue(); } else if (o instanceof BigInteger && cls.isAssignableFrom(Integer.class)) { return ((BigInteger) o).intValue(); } else if (o instanceof BigInteger && cls.isAssignableFrom(Double.class)) { return ((BigInteger) o).doubleValue(); } else if (o instanceof BigInteger && cls.isAssignableFrom(Float.class)) { return ((BigInteger) o).floatValue(); } else if (o instanceof BigInteger && cls.isAssignableFrom(Short.class)) { return ((BigInteger) o).shortValue(); } else if (o instanceof BigInteger && cls.isAssignableFrom(BigDecimal.class)) { return new BigDecimal((BigInteger) o); } else if (o instanceof BigDecimal && cls.isAssignableFrom(Double.class)) { return ((BigDecimal) o).doubleValue(); } else if (o instanceof BigDecimal && cls.isAssignableFrom(Integer.class)) { return ((BigDecimal) o).toBigInteger().intValue(); } else if (o instanceof BigDecimal && cls.isAssignableFrom(Float.class)) { return ((BigDecimal) o).floatValue(); } else if (o instanceof BigDecimal && cls.isAssignableFrom(Short.class)) { return ((BigDecimal) o).toBigInteger().shortValue(); } else if (o instanceof BigDecimal && cls.isAssignableFrom(Long.class)) { return ((BigDecimal) o).toBigInteger().longValue(); } else if (o instanceof BigDecimal && cls.isAssignableFrom(BigInteger.class)) { return ((BigDecimal) o).toBigInteger(); } else if ((o instanceof Short || o instanceof Integer || o instanceof Long) && cls.isAssignableFrom(BigInteger.class)) { return new BigInteger(o.toString()); } else if (o instanceof Number && cls.isAssignableFrom(BigDecimal.class)) { return new BigDecimal(o.toString()); } else if (o instanceof Number && cls.isAssignableFrom(Short.class)) return ((Number) o).shortValue(); else if (o instanceof Number && cls.isAssignableFrom(Integer.class)) return ((Number) o).intValue(); else if (o instanceof Number && cls.isAssignableFrom(Integer.class)) return ((Number) o).intValue(); else if (o instanceof Number && cls.isAssignableFrom(Long.class)) return ((Number) o).longValue(); else if (o instanceof Number && cls.isAssignableFrom(Float.class)) return ((Number) o).floatValue(); else if (o instanceof Number && cls.isAssignableFrom(Double.class)) return ((Number) o).doubleValue(); else return o; } }
java
protected static ActivityManager.MemoryInfo getMemInfo(Context ctx) { try { ActivityManager actManager = (ActivityManager) ctx.getSystemService(ACTIVITY_SERVICE); ActivityManager.MemoryInfo memInfo = new ActivityManager.MemoryInfo(); actManager.getMemoryInfo(memInfo); return memInfo; } catch (Exception e) { Log.e(TAG, "Error getting MemoryInfo.", e); return null; } }
python
def _make_actor_method_executor(self, method_name, method, actor_imported): """Make an executor that wraps a user-defined actor method. The wrapped method updates the worker's internal state and performs any necessary checkpointing operations. Args: method_name (str): The name of the actor method. method (instancemethod): The actor method to wrap. This should be a method defined on the actor class and should therefore take an instance of the actor as the first argument. actor_imported (bool): Whether the actor has been imported. Checkpointing operations will not be run if this is set to False. Returns: A function that executes the given actor method on the worker's stored instance of the actor. The function also updates the worker's internal state to record the executed method. """ def actor_method_executor(dummy_return_id, actor, *args): # Update the actor's task counter to reflect the task we're about # to execute. self._worker.actor_task_counter += 1 # Execute the assigned method and save a checkpoint if necessary. try: if is_class_method(method): method_returns = method(*args) else: method_returns = method(actor, *args) except Exception as e: # Save the checkpoint before allowing the method exception # to be thrown, but don't save the checkpoint for actor # creation task. if (isinstance(actor, ray.actor.Checkpointable) and self._worker.actor_task_counter != 1): self._save_and_log_checkpoint(actor) raise e else: # Handle any checkpointing operations before storing the # method's return values. # NOTE(swang): If method_returns is a pointer to the actor's # state and the checkpointing operations can modify the return # values if they mutate the actor's state. Is this okay? if isinstance(actor, ray.actor.Checkpointable): # If this is the first task to execute on the actor, try to # resume from a checkpoint. if self._worker.actor_task_counter == 1: if actor_imported: self._restore_and_log_checkpoint(actor) else: # Save the checkpoint before returning the method's # return values. self._save_and_log_checkpoint(actor) return method_returns return actor_method_executor
python
def get_url(access_token, endpoint=ams_rest_endpoint, flag=True): '''Get Media Services Final Endpoint URL. Args: access_token (str): A valid Azure authentication token. endpoint (str): Azure Media Services Initial Endpoint. flag (bol): flag. Returns: HTTP response. JSON body. ''' return do_ams_get_url(endpoint, access_token, flag)
python
def call(self, method, args={}, retry=False, retry_policy=None, ticket=None, **props): """Send message to the same actor and return :class:`AsyncResult`.""" ticket = ticket or uuid() reply_q = self.get_reply_queue(ticket) self.cast(method, args, declare=[reply_q], reply_to=ticket, **props) return self.AsyncResult(ticket, self)
java
protected Member findMostSpecificMemberIn (List<Member> memberList) throws NoSuchMethodException { List<Member> mostSpecificMembers = new ArrayList<Member>(); for (Member member : memberList) { if (mostSpecificMembers.isEmpty()) { // First guy in is the most specific so far. mostSpecificMembers.add(member); } else { boolean moreSpecific = true; boolean lessSpecific = false; // Is member more specific than everyone in the most-specific set? for (Member moreSpecificMember : mostSpecificMembers) { if (!memberIsMoreSpecific(member, moreSpecificMember)) { // if the candidate member is not more specific than this member, then it's // not more specific than the entire set, but it may still be equivalently // specific, so we check that next moreSpecific = false; // we check for a member of equal specificity by checking to see if this // most specific member is explicitly more specific than the candidate // member. if it is more specific, the candidate member can be chucked, // otherwise we need to add the candidate member to the most-specific set lessSpecific = memberIsMoreSpecific(moreSpecificMember, member); break; } } if (moreSpecific) { // Member is the most specific now. mostSpecificMembers.clear(); mostSpecificMembers.add(member); } else if (!lessSpecific) { // Add to ambiguity set if mutually unspecific. mostSpecificMembers.add(member); } } } if (mostSpecificMembers.size() > 1) { throw new NoSuchMethodException( "Ambiguous request for member in " + _clazz.getName() + " matching given args" ); } return mostSpecificMembers.get(0); }
java
public AnimatedImageResultBuilder setDecodedFrames( List<CloseableReference<Bitmap>> decodedFrames) { mDecodedFrames = CloseableReference.cloneOrNull(decodedFrames); return this; }
python
def get(self, sid): """ Constructs a ParticipantContext :param sid: A 34 character string that uniquely identifies this resource. :returns: twilio.rest.messaging.v1.session.participant.ParticipantContext :rtype: twilio.rest.messaging.v1.session.participant.ParticipantContext """ return ParticipantContext(self._version, session_sid=self._solution['session_sid'], sid=sid, )
python
def _clear(self): '''Clear state. ''' self._depth = 0 # recursion depth self._duplicate = 0 self._incl = '' # or ' (incl. code)' self._missed = 0 # due to errors self._profile = False self._profs = {} self._seen = {} self._total = 0 # total size for k in _keys(self._excl_d): self._excl_d[k] = 0
java
public static String getRequestParameters(HttpServletRequest aRequest) { // set the ALGORIGTHM as defined for the application // ALGORITHM = (String) aRequest.getAttribute(Constants.ENC_ALGORITHM); Map m = aRequest.getParameterMap(); return createQueryStringFromMap(m, "&").toString(); }
java
public static <T> void sort(List<T> list, Comparator<? super T> comparator) { if (list instanceof RandomAccess) { quicksort(list, comparator); } else { List<T> copy = new ArrayList<>(list); quicksort(copy, comparator); list.clear(); list.addAll(copy); } }
python
def live(self): """Get a live stream of timeseries readings. This returns an Iterable over a live stream of readings. Note that the result will need to be closed since the system can not tell when you'll be done with it. You can either call ``close`` on the endpoint when you're or use the context management facilities of the endpoint. .. code-block:: python # Fetch a sensor timeseries = sensor.timeseries() # ensure live endpoint closed with timeseries.live() as live: # Wait for 10 readings first10 = list(islice(live, 10)) Returns: """ session = self._session url = "{}/live".format(self._base_url) supported_params = frozenset(['filter[port]']) params = {k: v for k, v in iteritems(self._params) if k in supported_params} return session.live(url, self._datapoint_class, { 'is_aggregate': self._is_aggregate }, params=params)
java
public static Cookie getSSOCookieFromSSOToken() throws Exception { Subject subject = null; Cookie ltpaCookie = null; if (webAppSecConfig == null) { // if we don't have the config, we can't construct the cookie return null; } try { subject = WSSubject.getRunAsSubject(); if (subject == null) { subject = WSSubject.getCallerSubject(); } if (subject != null) { ltpaCookie = getLTPACookie(subject); } else { if (tc.isDebugEnabled()) { Tr.debug(tc, "No subjects on the thread"); } } } catch (Exception e) { if (tc.isDebugEnabled()) { Tr.debug(tc, "getSSOCookieFromSSOToken caught exception: " + e.getMessage()); } throw e; } return ltpaCookie; }
java
protected Optional<Dataset> createJobProps(Dataset dataset) throws IOException { if (this.recompactFromOutputPaths && (!latePathsFound(dataset))) { LOG.info(String.format("Skipping recompaction for %s since there is no late data in %s", new Object[] { dataset.inputPaths(), dataset.inputLatePaths() })); return Optional.absent(); } State jobProps = new State(); jobProps.addAll(this.state); jobProps.setProp(MRCompactor.COMPACTION_ENABLE_SUCCESS_FILE, false); jobProps.setProp(MRCompactor.COMPACTION_INPUT_DEDUPLICATED, this.inputDeduplicated); jobProps.setProp(MRCompactor.COMPACTION_OUTPUT_DEDUPLICATED, this.outputDeduplicated); jobProps.setProp(MRCompactor.COMPACTION_SHOULD_DEDUPLICATE, !this.inputDeduplicated && this.outputDeduplicated); if (this.recompactFromOutputPaths || !MRCompactor.datasetAlreadyCompacted(this.fs, dataset, renameSourceDirEnabled)) { if (renameSourceDirEnabled) { Set<Path> newUnrenamedDirs = MRCompactor.getDeepestLevelUnrenamedDirsWithFileExistence(this.fs, dataset.inputPaths()); if (getAllFilePathsRecursively(newUnrenamedDirs).isEmpty()) { return Optional.absent(); } LOG.info ("[{}] has unprocessed directories for first time compaction: {}", dataset.getDatasetName(), newUnrenamedDirs); dataset.overwriteInputPaths(newUnrenamedDirs); dataset.setRenamePaths(newUnrenamedDirs); } else { addInputLateFilesForFirstTimeCompaction(jobProps, dataset); } LOG.info(String.format("Created MR job properties for input %s and output %s.", dataset.inputPaths(), dataset.outputPath())); dataset.setJobProps(jobProps); return Optional.of(dataset); } else { return obtainDatasetWithJobProps (jobProps, dataset); } }
python
def get_subgraphs_as_molecules(self, use_weights=False): """ Retrieve subgraphs as molecules, useful for extracting molecules from periodic crystals. Will only return unique molecules, not any duplicates present in the crystal (a duplicate defined as an isomorphic subgraph). :param use_weights (bool): If True, only treat subgraphs as isomorphic if edges have the same weights. Typically, this means molecules will need to have the same bond lengths to be defined as duplicates, otherwise bond lengths can differ. This is a fairly robust approach, but will treat e.g. enantiomers as being duplicates. :return: list of unique Molecules in Structure """ # creating a supercell is an easy way to extract # molecules (and not, e.g., layers of a 2D crystal) # without adding extra logic if getattr(self, '_supercell_sg', None) is None: self._supercell_sg = supercell_sg = self*(3,3,3) # make undirected to find connected subgraphs supercell_sg.graph = nx.Graph(supercell_sg.graph) # find subgraphs all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph)) # discount subgraphs that lie across *supercell* boundaries # these will subgraphs representing crystals molecule_subgraphs = [] for subgraph in all_subgraphs: intersects_boundary = any([d['to_jimage'] != (0, 0, 0) for u, v, d in subgraph.edges(data=True)]) if not intersects_boundary: molecule_subgraphs.append(subgraph) # add specie names to graph to be able to test for isomorphism for subgraph in molecule_subgraphs: for n in subgraph: subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie)) # now define how we test for isomorphism def node_match(n1, n2): return n1['specie'] == n2['specie'] def edge_match(e1, e2): if use_weights: return e1['weight'] == e2['weight'] else: return True # prune duplicate subgraphs unique_subgraphs = [] for subgraph in molecule_subgraphs: already_present = [nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs] if not any(already_present): unique_subgraphs.append(subgraph) # get Molecule objects for each subgraph molecules = [] for subgraph in unique_subgraphs: coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()] species = [supercell_sg.structure[n].specie for n in subgraph.nodes()] molecule = Molecule(species, coords) # shift so origin is at center of mass molecule = molecule.get_centered_molecule() molecules.append(molecule) return molecules
java
private void notifyBroadcastClose() { final IStreamAwareScopeHandler handler = getStreamAwareHandler(); if (handler != null) { try { handler.streamBroadcastClose(this); } catch (Throwable t) { log.error("Error in notifyBroadcastClose", t); } } }
java
public static String findNearest( String key, String[] group ) { return findNearest(key,Arrays.asList(group)); }
java
public void setFastCatchupTimeSecs(long secondsSinceEpoch) { lock.lock(); try { checkState(chain == null || !chain.shouldVerifyTransactions(), "Fast catchup is incompatible with fully verifying"); fastCatchupTimeSecs = secondsSinceEpoch; if (downloadPeer != null) { downloadPeer.setDownloadParameters(secondsSinceEpoch, bloomFilterMerger.getLastFilter() != null); } } finally { lock.unlock(); } }
python
def delete_api_key(apiKey, region=None, key=None, keyid=None, profile=None): ''' Deletes a given apiKey CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_key apikeystring ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_api_key(apiKey=apiKey) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
python
def field(self, name): ''' Get the gdb.Value for the given field within the PyObject, coping with some python 2 versus python 3 differences. Various libpython types are defined using the "PyObject_HEAD" and "PyObject_VAR_HEAD" macros. In Python 2, this these are defined so that "ob_type" and (for a var object) "ob_size" are fields of the type in question. In Python 3, this is defined as an embedded PyVarObject type thus: PyVarObject ob_base; so that the "ob_size" field is located insize the "ob_base" field, and the "ob_type" is most easily accessed by casting back to a (PyObject*). ''' if self.is_null(): raise NullPyObjectPtr(self) if name == 'ob_type': pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type()) return pyo_ptr.dereference()[name] if name == 'ob_size': try: # Python 2: return self._gdbval.dereference()[name] except RuntimeError: # Python 3: return self._gdbval.dereference()['ob_base'][name] # General case: look it up inside the object: return self._gdbval.dereference()[name]
java
public static String replaceAll( String pattern, CharSequence input, String replacement) { input = new InterruptibleCharSequence(input); Matcher m = getMatcher(pattern, input); String res = m.replaceAll(replacement); recycleMatcher(m); return res; }
python
def user_agent(self): """Return the formatted user agent string.""" components = ["/".join(x) for x in self.user_agent_components.items()] return " ".join(components)
python
def remove_redundant_items(items): # type: (List[AbstractType]) -> List[AbstractType] """Filter out redundant union items.""" result = [] for item in items: for other in items: if item is not other and is_redundant_union_item(item, other): break else: result.append(item) return result
java
public Datapoint addDoubleValue(long time, double value) { initialValues(); checkType(TsdbConstants.TYPE_DOUBLE); values.add(Lists.<JsonNode> newArrayList(new LongNode(time), new DoubleNode(value))); return this; }
python
def add_to_island_expectations_dict(average_window_readcount, current_max_scaled_score, island_eligibility_threshold, island_expectations, gap_contribution): # type: ( float, int, float, Dict[int, float], float) -> Dict[int, float] """Can probably be heavily optimized. Time required to run can be seen from logging info.""" scaled_score = current_max_scaled_score + E_VALUE for index in range(current_max_scaled_score + 1, scaled_score + 1): island_expectation = 0.0 i = island_eligibility_threshold #i is the number of tags in the added window current_island = int(round(index - compute_window_score( i, average_window_readcount) / BIN_SIZE)) while (current_island >= 0): if current_island in island_expectations: island_expectation += _poisson( i, average_window_readcount) * island_expectations[ current_island] i += 1 current_island = int(round(index - compute_window_score( i, average_window_readcount) / BIN_SIZE)) island_expectation *= gap_contribution if island_expectation: island_expectations[index] = island_expectation return island_expectations
java
public static void clearRecentHistory(Context context) { SearchHistoryDatabase database = SearchHistoryDatabase.getInstance(context); SearchHistoryDatabase.deleteAllData(database); }
java
private static URI asRefBase( JsonString uri) { try { return uri == null ? null : new URI( uri.getChars().toString()); } catch( Exception e) { throw new ProjectException( String.format( "Error defining reference base=%s", uri), e); } }
java
public void purge() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "purge"); synchronized(queueMonitor) { state = CLOSED; for (int i=0; i < JFapChannelConstants.MAX_PRIORITY_LEVELS-1; ++i) { queueArray[i].monitor.setActive(false); } closeWaitersMonitor.setActive(false); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "purge"); }
python
def conjugate(self): """:obj:`DualQuaternion`: The conjugate of this quaternion. """ qr_c_xyzw = quaternion_conjugate(self._qr) qd_c_xyzw = quaternion_conjugate(self._qd) qr_c_wxyz = np.roll(qr_c_xyzw, 1) qd_c_wxyz = np.roll(qd_c_xyzw, 1) return DualQuaternion(qr_c_wxyz, qd_c_wxyz)
python
def disk(x, y, height, gaussian_width): """ Circular disk with Gaussian fall-off after the solid central region. """ disk_radius = height/2.0 distance_from_origin = np.sqrt(x**2+y**2) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width*gaussian_width if sigmasq==0.0: falloff = x*0.0 else: with float_error_ignore(): falloff = np.exp(np.divide(-distance_outside_disk*distance_outside_disk, 2*sigmasq)) return np.where(distance_outside_disk<=0,1.0,falloff)
python
def zSetDefaultMeritFunctionSEQ(self, ofType=0, ofData=0, ofRef=0, pupilInteg=0, rings=0, arms=0, obscuration=0, grid=0, delVignetted=False, useGlass=False, glassMin=0, glassMax=1000, glassEdge=0, useAir=False, airMin=0, airMax=1000, airEdge=0, axialSymm=True, ignoreLatCol=False, addFavOper=False, startAt=1, relativeXWgt=1.0, overallWgt=1.0, configNum=0): """Sets the default merit function for Sequential Merit Function Editor Parameters ---------- ofType : integer optimization function type (0=RMS, ...) ofData : integer optimization function data (0=Wavefront, 1=Spot Radius, ...) ofRef : integer optimization function reference (0=Centroid, ...) pupilInteg : integer pupil integration method (0=Gaussian Quadrature, 1=Rectangular Array) rings : integer rings (0=1, 1=2, 2=3, 3=4, ...) arms : integer arms (0=6, 1=8, 2=10, 3=12) obscuration : real obscuration delVignetted : boolean delete vignetted ? useGlass : boolean whether to use Glass settings for thickness boundary glassMin : real glass mininum thickness glassMax : real glass maximum thickness glassEdge : real glass edge thickness useAir : boolean whether to use Air settings for thickness boundary airMin : real air minimum thickness airMax : real air maximum thickness airEdge : real air edge thickness axialSymm : boolean assume axial symmetry ignoreLatCol : boolean ignore latent color addFavOper : boolean add favorite color configNum : integer configuration number (0=All) startAt : integer start at relativeXWgt : real relative X weight overallWgt : real overall weight """ mfe = self.pMFE wizard = mfe.pSEQOptimizationWizard wizard.pType = ofType wizard.pData = ofData wizard.pReference = ofRef wizard.pPupilIntegrationMethod = pupilInteg wizard.pRing = rings wizard.pArm = arms wizard.pObscuration = obscuration wizard.pGrid = grid wizard.pIsDeleteVignetteUsed = delVignetted wizard.pIsGlassUsed = useGlass wizard.pGlassMin = glassMin wizard.pGlassMax = glassMax wizard.pGlassEdge = glassEdge wizard.pIsAirUsed = useAir wizard.pAirMin = airMin wizard.pAirMax = airMax wizard.pAirEdge = airEdge wizard.pIsAssumeAxialSymmetryUsed = axialSymm wizard.pIsIgnoreLateralColorUsed = ignoreLatCol wizard.pConfiguration = configNum wizard.pIsAddFavoriteOperandsUsed = addFavOper wizard.pStartAt = startAt wizard.pRelativeXWeight = relativeXWgt wizard.pOverallWeight = overallWgt wizard.CommonSettings.OK()
java
@Override public DataPoint[] getLastN(int n, DataPoint.Type type) { return getLastN(n, 1, type); }
java
protected Map<String, Role> parseDefinedRoles(Config config) { // Parse the defined Roles Map<String, Role> roleMap = new HashMap<>(); if (!config.hasPath("roles")) { log.trace("'{}' has no roles", config); } else if (config.hasPath("roles")) { log.trace("Parsing Role definitions"); Config roleConfig = config.getConfig("roles"); for (Map.Entry<String, ConfigValue> entry : roleConfig.entrySet()) { String name = entry.getKey(); List<String> permissions = roleConfig.getStringList(name); Role role = new Role(name, permissions.toArray(new String[permissions.size()])); roleMap.put(role.getName(), role); } } return Collections.unmodifiableMap(roleMap); }
java
@Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws IOException { clearSession(request); JSONObject json = ServletHelpers.readObject(request.getReader()); String username = (String) json.get("username"); String password = (String) json.get("password"); AuthenticateResult result = Authenticator.authenticate( authConfiguration, request, username, password, subject -> { LOG.info("Logging in user: {}", AuthHelpers.getUsername(subject)); setupSession(request, subject, username); sendResponse(response, subject); }); switch (result) { case AUTHORIZED: // response was sent using the authenticated subject, nothing more to do break; case NOT_AUTHORIZED: case NO_CREDENTIALS: ServletHelpers.doForbidden(response); break; } }
python
def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, has_accumulators=False, **kwargs): """While Loop. See comments above for WhileLoopOperation num_loop_vars is a hack for the multi-gpu setup. In this case, loops are generally slow, as all loop variables are placed on device. By setting num_loop_vars=k, then all of the loop variables except for the first k are handled as mtf Variables instead of loop variables, using explicit updates and control dependencies. In this case, we only return the first num_loop_vars outputs. Do not use this option on TPU, since it is unnecessary and also produces incorrect results, since xla does not respect control dependencies. Args: cond_fn: a function from n Tensors to scalar boolean Tensor body_fn: a function from n Tensors to list of n Tensors inputs: a list of n Tensors num_loop_vars: an optional integer. has_accumulators: a boolean **kwargs: additional kwargs passed to tf.while_loop Returns: a list of n Tensors. """ if num_loop_vars is None: return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs # Turn all loop vars except for the first ones into non-loop vars. # see comments in docstring. assert num_loop_vars > 0 extra_inputs = inputs[num_loop_vars:] my_vars = [] for i, x in enumerate(extra_inputs): my_vars.append(get_variable( x.mesh, "loop_var_%d" % i, x.shape, initializer=tf.zeros_initializer(), dtype=x.dtype, collections=[tf.GraphKeys.LOCAL_VARIABLES])) my_vars = tuple(my_vars) first_input = depend( inputs[0], [assign(var, x) for var, x in zip(my_vars, extra_inputs)]) inputs = [first_input] + inputs[1:num_loop_vars] def my_cond_fn(*inputs): return cond_fn(*(inputs + my_vars)) def my_body_fn(*inputs): outputs = tuple(body_fn(*(inputs + my_vars))) extra_outputs = outputs[num_loop_vars:] first_output = depend( outputs[0], [assign(var, x) for var, x in zip(my_vars, extra_outputs)]) outputs = (first_output,) + outputs[1:num_loop_vars] return outputs return WhileLoopOperation( my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs, has_accumulators=has_accumulators).outputs
python
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
java
public String getNaturalAnalog() { MonomerFactory factory = null; try { factory = MonomerFactory.getInstance(); } catch (Exception ex) { Logger.getLogger(Nucleotide.class.getName()).log(Level.SEVERE, "Unable to initialize monomer factory", ex); } return getNaturalAnalog(factory.getMonomerStore()); }
python
def add_dr( self, dr ): """ Add an observed interatomic distance to the g(r) data at dr. Args: dr (Float): the interatomic distance, dr. Returns: None """ this_bin = int( dr / self.dr ) if this_bin > self.number_of_bins: raise IndexError( 'dr is larger than rdf max_r' ) self.data[ this_bin ] += 1
java
Observable<ComapiResult<List<Conversation>>> doGetConversations(@NonNull final String token, @NonNull final String profileId, @NonNull final Scope scope) { return wrapObservable(service.getConversations(AuthManager.addAuthPrefix(token), apiSpaceId, scope.getValue(), profileId).map(mapToComapiResult()), log, "Getting conversations " + profileId + " " + scope.name()); }