language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
@Override public TransactionalSharedLuceneLock obtainLock(Directory dir, String lockName) throws IOException { if (!(dir instanceof DirectoryLucene)) { throw new UnsupportedOperationException("TransactionalSharedLuceneLock can only be used with DirectoryLucene, got: " + dir); } DirectoryLucene infinispanDirectory = (DirectoryLucene) dir; Cache cache = infinispanDirectory.getDistLockCache(); String indexName = infinispanDirectory.getIndexName(); TransactionManager tm = cache.getAdvancedCache().getTransactionManager(); if (tm == null) { ComponentStatus status = cache.getAdvancedCache().getComponentRegistry().getStatus(); if (status.equals(ComponentStatus.RUNNING)) { throw new CacheException( "Failed looking up TransactionManager. Check if any transaction manager is associated with Infinispan cache: \'" + cache.getName() + "\'"); } else { throw new CacheException("Failed looking up TransactionManager: the cache is not running"); } } TransactionalSharedLuceneLock lock = new TransactionalSharedLuceneLock(cache, indexName, lockName, tm); CommonLockObtainUtils.attemptObtain(lock); return lock; }
java
public boolean createJob(String slurmScript, String heronExec, String[] commandArgs, String topologyWorkingDirectory, long containers) { return createJob(slurmScript, heronExec, commandArgs, topologyWorkingDirectory, containers, null); }
java
@SuppressWarnings("SimplifiableIfStatement") @RestrictTo(RestrictTo.Scope.LIBRARY) public boolean propagateToChildren(String key, int depth) { if ("__container".equals(key)) { return true; } return depth < keys.size() - 1 || keys.get(depth).equals("**"); }
java
public Filter getQueueFilter(PermissionManager permissionManager) { if (permissionManager.hasPermission(FixedPermissions.TECHSUPPORT) || permissionManager.hasPermission(FixedPermissions.ADMIN)) { return null; } Set<String> visibleQueues = getVisibleQueues(permissionManager); Filter filters[] = new Filter[visibleQueues.size()]; int i=0; for (String queueName: visibleQueues) { filters[i++] = new Compare.Equal("queueName", queueName); } Filter statusFilter[] = new Filter[2]; statusFilter[0] = new Or(filters); statusFilter[1] = new Compare.Equal("status", TaskStatus.WAIT); Filter userFilter[] = new Filter[2]; userFilter[0] = new Compare.Equal("lockedBy",permissionManager.getCurrentUser()); userFilter[1] = new Compare.Equal("status", TaskStatus.BUSY); return new Or(new And(statusFilter),new And(userFilter)); }
java
public void runSyncOnNewThread(Thread newThread) { lock.writeLock().lock(); try { if (this.timeoutTask == null) { throw new IllegalStateException(Tr.formatMessage(tc, "internal.error.CWMFT4999E")); } stop(); this.stopped = false; long remaining = check(); Runnable timeoutTask = () -> { newThread.interrupt(); }; start(timeoutTask, remaining); } finally { lock.writeLock().unlock(); } }
python
def layer(output_shape=None, new_parameters=None): """Create a layer class from a function.""" def layer_decorator(call): """Decorating the call function.""" def output_shape_fun(self, input_shape): if output_shape is None: return input_shape kwargs = self._init_kwargs # pylint: disable=protected-access return output_shape(input_shape, **kwargs) def new_parameters_fun(self, input_shape, rng): if new_parameters is None: return () kwargs = self._init_kwargs # pylint: disable=protected-access return new_parameters(input_shape, rng, **kwargs) def call_fun(self, x, params=(), **kwargs): """The call function of the created class, derived from call.""" # Merge on-call kwargs with class-kwargs. call_kwargs = kwargs.copy() call_kwargs.update(self._init_kwargs) # pylint: disable=protected-access # Call with the merged kwargs. return call(x, params=params, **call_kwargs) # Set doc for python help. call_fun.__doc__ = call.__doc__ if output_shape is None: output_shape_fun.__doc__ = output_shape.__doc__ if new_parameters is None: new_parameters_fun.__doc__ = new_parameters.__doc__ # Create the class. cls = type(call.__name__, (Layer,), {'call': call_fun, 'output_shape': output_shape_fun, 'new_parameters': new_parameters_fun}) return cls return layer_decorator
python
def parse(self, valstr): # type: (bytes) -> bool ''' A method to parse an El Torito Boot Catalog out of a string. Parameters: valstr - The string to parse the El Torito Boot Catalog out of. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog already initialized') if self.state == self.EXPECTING_VALIDATION_ENTRY: # The first entry in an El Torito boot catalog is the Validation # Entry. A Validation entry consists of 32 bytes (described in # detail in the parse_eltorito_validation_entry() method). self.validation_entry.parse(valstr) self.state = self.EXPECTING_INITIAL_ENTRY elif self.state == self.EXPECTING_INITIAL_ENTRY: # The next entry is the Initial/Default entry. An Initial/Default # entry consists of 32 bytes (described in detail in the # parse_eltorito_initial_entry() method). self.initial_entry.parse(valstr) self.state = self.EXPECTING_SECTION_HEADER_OR_DONE else: val = bytes(bytearray([valstr[0]])) if val == b'\x00': # An empty entry tells us we are done parsing El Torito. Do # some sanity checks. last_section_index = len(self.sections) - 1 for index, sec in enumerate(self.sections): if sec.num_section_entries != len(sec.section_entries): raise pycdlibexception.PyCdlibInvalidISO('El Torito section header specified %d entries, only saw %d' % (sec.num_section_entries, len(sec.section_entries))) if index != last_section_index: if sec.header_indicator != 0x90: raise pycdlibexception.PyCdlibInvalidISO('Intermediate El Torito section header not properly specified') # In theory, we should also make sure that the very last # section has a header_indicator of 0x91. However, we # have seen ISOs in the wild (FreeBSD 11.0 amd64) in which # this is not the case, so we skip that check. self._initialized = True elif val in (b'\x90', b'\x91'): # A Section Header Entry section_header = EltoritoSectionHeader() section_header.parse(valstr) self.sections.append(section_header) elif val in (b'\x88', b'\x00'): # A Section Entry. According to El Torito 2.4, a Section Entry # must follow a Section Header, but we have seen ISOs in the # wild that do not follow this (Mageia 4 ISOs, for instance). # To deal with this, we get a little complicated here. If there # is a previous section header, and the length of the entries # attached to it is less than the number of entries it should # have, then we attach this entry to that header. If there is # no previous section header, or if the previous section header # is already 'full', then we make this a standalone entry. secentry = EltoritoEntry() secentry.parse(valstr) if self.sections and len(self.sections[-1].section_entries) < self.sections[-1].num_section_entries: self.sections[-1].add_parsed_entry(secentry) else: self.standalone_entries.append(secentry) elif val == b'\x44': # A Section Entry Extension self.sections[-1].section_entries[-1].selection_criteria += valstr[2:] else: raise pycdlibexception.PyCdlibInvalidISO('Invalid El Torito Boot Catalog entry') return self._initialized
python
def format_output(self, rendered_widgets): """ Render the ``icekit_events/recurrence_rule_widget/format_output.html`` template with the following context: preset A choice field for preset recurrence rules. natural An input field for natural language recurrence rules. rfc A text field for RFC compliant recurrence rules. The default template positions the ``preset`` field above the ``natural`` and ``rfc`` fields. """ template = loader.get_template( 'icekit_events/recurrence_rule_widget/format_output.html') preset, natural, rfc = rendered_widgets context = Context({ 'preset': preset, 'natural': natural, 'rfc': rfc, }) return template.render(context)
java
private static boolean isOutFile(final File filePathName, final File inputMap) { final File relativePath = FileUtils.getRelativePath(inputMap.getAbsoluteFile(), filePathName.getAbsoluteFile()); return !(relativePath.getPath().length() == 0 || !relativePath.getPath().startsWith("..")); }
python
def bestseqs(self,thresh=None): """ m.bestseqs(,thresh=None) -- Return all k-mers that match motif with a score >= thresh """ if not thresh: if self._bestseqs: return self._bestseqs if not thresh: thresh = 0.8 * self.maxscore self._bestseqs = bestseqs(self,thresh) return self._bestseqs
python
def filter(self, *args, **kwargs): """ Works just like the default Manager's :func:`filter` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to retrieve, e.g. ``"path/to/folder"``. """ if 'path' in kwargs: kwargs = self.get_filter_args_with_path(False, **kwargs) return super(FileNodeManager, self).filter(*args, **kwargs)
java
public static Double getDouble(String propertyName) { return NumberUtil.toDoubleObject(System.getProperty(propertyName), null); }
java
public static void configureSARLProject(IProject project, boolean addNatures, boolean configureJavaNature, boolean createFolders, IProgressMonitor monitor) { try { final SubMonitor subMonitor = SubMonitor.convert(monitor, 11); // Add Natures final IStatus status = Status.OK_STATUS; if (addNatures) { addSarlNatures(project, subMonitor.newChild(1)); if (status != null && !status.isOK()) { SARLEclipsePlugin.getDefault().getLog().log(status); } } // Ensure SARL specific folders. final OutParameter<IFolder[]> sourceFolders = new OutParameter<>(); final OutParameter<IFolder[]> testSourceFolders = new OutParameter<>(); final OutParameter<IFolder[]> generationFolders = new OutParameter<>(); final OutParameter<IFolder[]> testGenerationFolders = new OutParameter<>(); final OutParameter<IFolder> generationFolder = new OutParameter<>(); final OutParameter<IFolder> outputFolder = new OutParameter<>(); final OutParameter<IFolder> testOutputFolder = new OutParameter<>(); ensureSourceFolders(project, createFolders, subMonitor, sourceFolders, testSourceFolders, generationFolders, testGenerationFolders, generationFolder, outputFolder, testOutputFolder); // SARL specific configuration SARLPreferences.setSpecificSARLConfigurationFor(project, generationFolder.get().getProjectRelativePath()); subMonitor.worked(1); // Create the Java project if (configureJavaNature) { if (!addNatures) { addNatures(project, subMonitor.newChild(1), JavaCore.NATURE_ID); } final IJavaProject javaProject = JavaCore.create(project); subMonitor.worked(1); // Build path BuildPathsBlock.flush( buildClassPathEntries(javaProject, sourceFolders.get(), testSourceFolders.get(), generationFolders.get(), testGenerationFolders.get(), testOutputFolder.get().getFullPath(), false, true), outputFolder.get().getFullPath(), javaProject, null, subMonitor.newChild(1)); } subMonitor.done(); } catch (CoreException exception) { SARLEclipsePlugin.getDefault().log(exception); } }
java
public List<Step<JobXMLDescriptor>> getAllStep() { List<Step<JobXMLDescriptor>> list = new ArrayList<Step<JobXMLDescriptor>>(); List<Node> nodeList = model.get("step"); for(Node node: nodeList) { Step<JobXMLDescriptor> type = new StepImpl<JobXMLDescriptor>(this, "step", model, node); list.add(type); } return list; }
python
def get_value_from_set(self, key): """ Get a value from previously reserved value set. """ #TODO: This should be done locally. # We do not really need to call centralised server if the set is already # reserved as the data there is immutable during execution key = key.lower() if self._remotelib: while True: value = self._remotelib.run_keyword('get_value_from_set', [key, self._my_id], {}) if value: return value time.sleep(0.1) logger.debug('waiting for a value') else: return _PabotLib.get_value_from_set(self, key, self._my_id)
java
public static int getPropertyValueEnum(int property, CharSequence valueAlias) { int propEnum = UPropertyAliases.INSTANCE.getPropertyValueEnum(property, valueAlias); if (propEnum == UProperty.UNDEFINED) { throw new IllegalIcuArgumentException("Invalid name: " + valueAlias); } return propEnum; }
python
def get(self, name, interval, **kwargs): ''' Get the set of values for a named timeseries and interval. If timestamp supplied, will fetch data for the period of time in which that timestamp would have fallen, else returns data for "now". If the timeseries resolution was not defined, then returns a simple list of values for the interval, else returns an ordered dict where the keys define the resolution interval and the values are the time series data in that (sub)interval. This allows the user to interpolate sparse data sets. If transform is defined, will utilize one of `[mean, count, min, max, sum]` to process each row of data returned. If the transform is a callable, will pass an array of data to the function. Note that the transform will be run after the data is condensed. If the transform is a list, then each row will return a hash of the form { transform_name_or_func : transformed_data }. If the transform is a hash, then it should be of the form { transform_name : transform_func } and will return the same structure as a list argument. Raises UnknownInterval if `interval` is not one of the configured intervals. TODO: Fix this method doc ''' config = self._intervals.get(interval) if not config: raise UnknownInterval(interval) timestamp = kwargs.get('timestamp', time.time()) fetch = kwargs.get('fetch') process_row = kwargs.get('process_row') or self._process_row condense = kwargs.get('condense', False) join_rows = kwargs.get('join_rows') or self._join transform = kwargs.get('transform') # DEPRECATED handle the deprecated version of condense condense = kwargs.get('condensed',condense) # If name is a list, then join all of results. It is more efficient to # use a single data structure and join "in-line" but that requires a major # refactor of the backends, so trying this solution to start with. At a # minimum we'd have to rebuild the results anyway because of the potential # for sparse data points would result in an out-of-order result. if isinstance(name, (list,tuple,set)): results = [ self._get(x, interval, config, timestamp, fetch=fetch, process_row=process_row) for x in name ] # Even resolution data is "coarse" in that it's not nested rval = self._join_results( results, True, join_rows ) else: rval = self._get( name, interval, config, timestamp, fetch=fetch, process_row=process_row ) # If condensed, collapse the result into a single row. Adjust the step_size # calculation to match. if config['coarse']: step_size = config['i_calc'].step_size(timestamp) else: step_size = config['r_calc'].step_size(timestamp) if condense and not config['coarse']: condense = condense if callable(condense) else self._condense rval = { config['i_calc'].normalize(timestamp) : condense(rval) } step_size = config['i_calc'].step_size(timestamp) if transform: for k,v in rval.items(): rval[k] = self._process_transform(v, transform, step_size) return rval
python
def minimum_needs_unit(field, feature, parent): """Retrieve units of the given minimum needs field name. For instance: * minimum_needs_unit('minimum_needs__clean_water') -> 'l/weekly' """ _ = feature, parent # NOQA field_definition = definition(field, 'field_name') if field_definition: unit_abbreviation = None frequency = None if field_definition.get('need_parameter'): need = field_definition['need_parameter'] if isinstance(need, ResourceParameter): unit_abbreviation = need.unit.abbreviation frequency = need.frequency elif field_definition.get('unit'): need_unit = field_definition.get('unit') unit_abbreviation = need_unit.get('abbreviation') if field_definition.get('frequency') and not frequency: frequency = field_definition.get('frequency') if not unit_abbreviation: unit_abbreviation = exposure_unit['plural_name'] once_frequency_field_keys = [ 'minimum_needs__toilets_count_field' ] if not frequency or ( field_definition['key'] in once_frequency_field_keys): return unit_abbreviation.lower() unit_format = '{unit_abbreviation}/{frequency}' return unit_format.format( unit_abbreviation=unit_abbreviation, frequency=frequency).lower() return None
java
private boolean isBareS3NBucketWithoutTrailingSlash(String s) { String s2 = s.toLowerCase(); Matcher m = Pattern.compile("s3n://[^/]*").matcher(s2); return m.matches(); }
java
public static <T> T read(Class<T> clazz, Object id) { return execute(new ReadEntityTransaction<T>(clazz, id)); }
java
public static Boolean isNotSet(String value) { return null == value || value.isEmpty() || value.trim().length() == 0; }
python
def _name_to_index(self, channels): """ Return the channel indices for the specified channel names. Integers contained in `channel` are returned unmodified, if they are within the range of ``self.channels``. Parameters ---------- channels : int or str or list of int or list of str Name(s) of the channel(s) of interest. Returns ------- int or list of int Numerical index(ces) of the specified channels. """ # Check if list, then run recursively if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._name_to_index(ch) for ch in channels] if isinstance(channels, six.string_types): # channels is a string containing a channel name if channels in self.channels: return self.channels.index(channels) else: raise ValueError("{} is not a valid channel name." .format(channels)) if isinstance(channels, int): if (channels < len(self.channels) and channels >= -len(self.channels)): return channels else: raise ValueError("index out of range") else: raise TypeError("input argument should be an integer, string or " "list of integers or strings")
java
private N overflowTreatment(N node, IndexTreePath<E> path) { if(settings.getOverflowTreatment().handleOverflow(this, node, path)) { return null; } return split(node); }
python
def for_category(self, category, live_only=False): """ Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet. """ filters = {'tag': category.tag} if live_only: filters.update({'entry__live': True}) return self.filter(**filters)
python
def __write_record(self, record_type, data): """Write single physical record.""" length = len(data) crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type]) crc = crc32c.crc_update(crc, data) crc = crc32c.crc_finalize(crc) self.__writer.write( struct.pack(_HEADER_FORMAT, _mask_crc(crc), length, record_type)) self.__writer.write(data) self.__position += _HEADER_LENGTH + length
python
def run(self): """ Run the plugin. This plugin extracts the operator manifest files from an image, saves them as a zip archive, and returns its path :return: str, path to operator manifests zip file """ if not self.should_run(): return manifests_archive_dir = tempfile.mkdtemp() image = self.workflow.image # As in flatpak_create_oci, we specify command to prevent possible docker daemon errors. container_dict = self.tasker.d.create_container(image, command=['/bin/bash']) container_id = container_dict['Id'] try: bits, stat = self.tasker.d.get_archive(container_id, IMG_MANIFESTS_PATH) except APIError as ex: msg = ('Could not extract operator manifest files. ' 'Is there a %s path in the image?' % (IMG_MANIFESTS_PATH)) self.log.debug('Error while trying to extract %s from image: %s', IMG_MANIFESTS_PATH, ex) self.log.error(msg) raise RuntimeError('%s %s' % (msg, ex)) finally: self.tasker.d.remove_container(container_id) with tempfile.NamedTemporaryFile() as extracted_file: for chunk in bits: extracted_file.write(chunk) extracted_file.flush() tar_archive = tarfile.TarFile(extracted_file.name) tar_archive.extractall(manifests_archive_dir) manifests_path = os.path.join(manifests_archive_dir, MANIFESTS_DIR_NAME) manifests_zipfile_path = os.path.join(manifests_archive_dir, OPERATOR_MANIFESTS_ARCHIVE) with zipfile.ZipFile(manifests_zipfile_path, 'w') as archive: for root, _, files in os.walk(manifests_path): for f in files: filedir = os.path.relpath(root, manifests_path) filepath = os.path.join(filedir, f) archive.write(os.path.join(root, f), filepath) manifest_files = archive.namelist() if not manifest_files: self.log.error('Empty operator manifests directory') raise RuntimeError('Empty operator manifests directory') self.log.debug("Archiving operator manifests: %s", manifest_files) shutil.rmtree(manifests_path) return manifests_zipfile_path
python
def namedb_row_factory( cursor, row ): """ Row factor to enforce some additional types: * force 'revoked' to be a bool """ d = {} for idx, col in enumerate( cursor.description ): if col[0] in ['revoked', 'locked', 'receive_whitelisted']: if row[idx] == 0: d[col[0]] = False elif row[idx] == 1: d[col[0]] = True elif row[idx] is None: d[col[0]] = None else: raise Exception("Invalid value for 'revoked': %s" % row[idx]) elif col[0] in ['credit_value', 'debit_value', 'vesting_value', 'token_fee']: # convert back to int. # this is safe in Python, since Python ints don't overflow try: d[col[0]] = int(row[idx]) if row[idx] is not None else None except ValueError as ve: log.exception(ve) log.fatal("Caught exception while converting '{}' to an int".format(row[idx])) os.abort() else: d[col[0]] = row[idx] return d
python
def resume_follow(self, index, body=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html>`_ :arg index: The name of the follow index to resume following. :arg body: The name of the leader index and other optional ccr related parameters """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") return self.transport.perform_request( "POST", _make_path(index, "_ccr", "resume_follow"), params=params, body=body )
java
private void appendValue(StringBuilder builder, Class fieldClazz, Object value) { if (fieldClazz != null && value != null && (fieldClazz.isAssignableFrom(String.class) || fieldClazz.isAssignableFrom(char.class) || fieldClazz.isAssignableFrom(Character.class) || value instanceof Enum)) { if (fieldClazz.isAssignableFrom(String.class)) { // To allow escape character value = ((String) value).replaceAll("'", "''"); } builder.append("'"); if (value instanceof Enum) { builder.append(((Enum) value).name()); } else { builder.append(value); } builder.append("'"); } else { builder.append(value); } }
python
def run_score(self): """ Run checks on self.files, printing raw percentage to stdout. """ diffs = 0 lines = 0 for file in self.files: try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue diffs += results.diffs lines += results.lines try: print(max(1 - diffs / lines, 0.0)) except ZeroDivisionError: print(0.0)
python
def find_field_generator_templates(obj): """ Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces. """ cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ #debug_print_dict(cls_dict, 'cls_dict') #debug_print_dict(obj_dict, 'obj_dict') field_gens = {} update_with_tohu_generators(field_gens, cls_dict) update_with_tohu_generators(field_gens, obj_dict) return field_gens
python
def load_data_source(local_path, remote_source_list, open_method, open_method_kwargs=dict(), remote_kwargs=dict(), verbose=True): '''Flexible data retreiver to download and cache the data files locally. Usage example (this makes a local copy of the ozone data file): :Example: .. code-block:: python from climlab.utils.data_source import load_data_source from xarray import open_dataset ozonename = 'apeozone_cam3_5_54.nc' ozonepath = 'http://thredds.atmos.albany.edu:8080/thredds/fileServer/CLIMLAB/ozone/' + ozonename data, path = load_data_source(local_path=ozonename, remote_source_list=[ozonepath], open_method=open_dataset) print(data) The order of operations is 1. Try to read the data directly from ``local_path`` 2. If the file doesn't exist then iterate through ``remote_source_list``. Try to download and save the file to ``local_path`` using http request If that works then open the data from ``local_path``. 3. As a last resort, try to read the data remotely from URLs in ``remote_source_list`` In all cases the file is opened and read by the user-supplied ``open_method`` (e.g. ``xarray.open_dataset``), with additional keyword arguments supplied as a dictionary through ``open_method_kwargs``. These are passed straight through to ``open_method``. Additional keyword arguments in ``remote_kwargs`` are only passed to ``open_method`` in option 3 above (remote access, e.g. through OpenDAP) Quiet all output by passing ``verbose=False``. Returns: - ``data`` is the data object returned by the successful call to ``open_method`` - ``path`` is the path that resulted in a successful call to ``open_method``. ''' try: path = local_path data = open_method(path, **open_method_kwargs) if verbose: print('Opened data from {}'.format(path)) #except FileNotFoundError: # this is a more specific exception in Python 3 except IOError: # works for Py2.7 and Py3.x # First try to load from remote sources and cache the file locally for source in remote_source_list: try: response = _download_and_cache(source, local_path) data = open_method(local_path, **open_method_kwargs) if verbose: print('Data retrieved from {} and saved locally.'.format(source)) break except Exception: continue else: # as a final resort, try opening the source remotely for source in remote_source_list: path = source try: # This works fine for Python >= 3.5 #data = open_method(path, **open_method_kwargs, **remote_kwargs) data = open_method(path, **merge_two_dicts(open_method_kwargs, remote_kwargs)) if verbose: print('Opened data remotely from {}'.format(source)) break except Exception: continue else: raise Exception('All data access methods have failed.') return data, path
python
def _parseNetDirectory(self, rva, size, magic = consts.PE32): """ Parses the NET directory. @see: U{http://www.ntcore.com/files/dotnetformat.htm} @type rva: int @param rva: The RVA where the NET directory starts. @type size: int @param size: The size of the NET directory. @type magic: int @param magic: (Optional) The type of PE. This value could be L{consts.PE32} or L{consts.PE64}. @rtype: L{NETDirectory} @return: A new L{NETDirectory} object. """ if not rva or not size: return None # create a NETDirectory class to hold the data netDirectoryClass = directories.NETDirectory() # parse the .NET Directory netDir = directories.NetDirectory.parse(utils.ReadData(self.getDataAtRva(rva, size))) netDirectoryClass.directory = netDir # get the MetaData RVA and Size mdhRva = netDir.metaData.rva.value mdhSize = netDir.metaData.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(mdhRva, mdhSize)) # parse the MetaData headers netDirectoryClass.netMetaDataHeader = directories.NetMetaDataHeader.parse(rd) # parse the NET metadata streams numberOfStreams = netDirectoryClass.netMetaDataHeader.numberOfStreams.value netDirectoryClass.netMetaDataStreams = directories.NetMetaDataStreams.parse(rd, numberOfStreams) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value rd.setOffset(stream.offset.value) rd2 = utils.ReadData(rd.read(stream.size.value)) stream.info = [] if name == "#~" or i == 0: stream.info = rd2 elif name == "#Strings" or i == 1: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetString() }) elif name == "#US" or i == 2: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetUnicodeString() }) elif name == "#GUID" or i == 3: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetGuid() }) elif name == "#Blob" or i == 4: while len(rd2) > 0: offset = rd2.tell() stream.info.append({ offset: rd2.readDotNetBlob() }) for i in range(numberOfStreams): stream = netDirectoryClass.netMetaDataStreams[i] name = stream.name.value if name == "#~" or i == 0: stream.info = directories.NetMetaDataTables.parse(stream.info, netDirectoryClass.netMetaDataStreams) # parse .NET resources # get the Resources RVA and Size resRva = netDir.resources.rva.value resSize = netDir.resources.size.value # read all the MetaData rd = utils.ReadData(self.getDataAtRva(resRva, resSize)) resources = [] for i in netDirectoryClass.netMetaDataStreams[0].info.tables["ManifestResource"]: offset = i["offset"] rd.setOffset(offset) size = rd.readDword() data = rd.read(size) if data[:4] == "\xce\xca\xef\xbe": data = directories.NetResources.parse(utils.ReadData(data)) resources.append({ "name": i["name"], "offset": offset + 4, "size": size, "data": data }) netDirectoryClass.directory.resources.info = resources return netDirectoryClass
python
def set_client_info(cls, client_id, client_ver): """ .. py:function:: set_client_info(cls, client_id, client_ver) 设置调用api的客户端信息, 非必调接口 :param client_id: str, 客户端标识 :param client_ver: int, 客户端版本号 :return: None :example: .. code:: python from futuquant import * SysConfig.set_client_info("MyFutuQuant", 0) quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111) quote_ctx.close() """ SysConfig.CLINET_ID = client_id SysConfig.CLIENT_VER = client_ver
python
def get_as_datetime(self, key): """ Converts map element into a Date or returns the current date if conversion is not possible. :param key: an index of element to get. :return: Date value ot the element or the current date if conversion is not supported. """ value = self.get(key) return DateTimeConverter.to_datetime(value)
java
@Nonnull public static List<TimerTask> unwraps(@Nullable Collection<? extends TimerTask> tasks) { if (null == tasks) return Collections.emptyList(); List<TimerTask> copy = new ArrayList<TimerTask>(); for (TimerTask task : tasks) { if (!(task instanceof TtlTimerTask)) copy.add(task); else copy.add(((TtlTimerTask) task).getTimerTask()); } return copy; }
java
public byte[] requestMFPSchemata(byte[] schemaData) throws SIConnectionLostException, SIConnectionDroppedException, SIConnectionUnavailableException, SIErrorException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "requestMFPSchemata"); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.bytes(this, tc, schemaData); byte[] returnSchemaData = null; CommsByteBuffer request = getCommsByteBuffer(); request.wrap(schemaData); CommsByteBuffer reply = jfapExchange(request, JFapChannelConstants.SEG_REQUEST_SCHEMA, JFapChannelConstants.PRIORITY_HIGHEST, true); try { short err = reply.getCommandCompletionCode(JFapChannelConstants.SEG_REQUEST_SCHEMA_R); if (err != CommsConstants.SI_NO_EXCEPTION) { checkFor_SIConnectionUnavailableException(reply, err); checkFor_SIErrorException(reply, err); defaultChecker(reply, err); } // Now get the bytes returnSchemaData = reply.getRemaining(); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) SibTr.bytes(this, tc, returnSchemaData); } finally { if (reply != null) reply.release(); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "requestMFPSchemata", returnSchemaData); return returnSchemaData; }
java
public void setPrefixes(NamespaceSupport nsSupport, boolean excludeXSLDecl) throws TransformerException { Enumeration decls = nsSupport.getDeclaredPrefixes(); while (decls.hasMoreElements()) { String prefix = (String) decls.nextElement(); if (null == m_declaredPrefixes) m_declaredPrefixes = new ArrayList(); String uri = nsSupport.getURI(prefix); if (excludeXSLDecl && uri.equals(Constants.S_XSLNAMESPACEURL)) continue; // System.out.println("setPrefixes - "+prefix+", "+uri); XMLNSDecl decl = new XMLNSDecl(prefix, uri, false); m_declaredPrefixes.add(decl); } }
python
def _watch_folder(folder, destination, compiler_args): """Compares "modified" timestamps against the "compiled" dict, calls compiler if necessary.""" for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: # Ignore filenames starting with ".#" for Emacs compatibility if watched_extension(filename) and not filename.startswith('.#'): fullpath = os.path.join(dirpath, filename) subfolder = os.path.relpath(dirpath, folder) mtime = os.stat(fullpath).st_mtime # Create subfolders in target directory if they don't exist compiled_folder = os.path.join(destination, subfolder) if not os.path.exists(compiled_folder): os.makedirs(compiled_folder) compiled_path = _compiled_path(compiled_folder, filename) if (not fullpath in compiled or compiled[fullpath] < mtime or not os.path.isfile(compiled_path)): compile_file(fullpath, compiled_path, compiler_args) compiled[fullpath] = mtime
python
def initialize(self): """Instantiates the cache area to be ready for updates""" self.Base.metadata.create_all(self.session.bind) logger.debug("initialized sqlalchemy orm tables")
python
def into_view(self): """Converts the index into a view""" try: return View._from_ptr(rustcall( _lib.lsm_index_into_view, self._get_ptr())) finally: self._ptr = None
java
public static Attr[] toAttrArray(Document doc, Object o) throws PageException { // Node[] if (o instanceof Node[]) { Node[] nodes = (Node[]) o; if (_isAllOfSameType(nodes, Node.ATTRIBUTE_NODE)) return (Attr[]) nodes; Attr[] attres = new Attr[nodes.length]; for (int i = 0; i < nodes.length; i++) { attres[i] = toAttr(doc, nodes[i]); } return attres; } // Collection else if (o instanceof Collection) { Collection coll = (Collection) o; Iterator<Entry<Key, Object>> it = coll.entryIterator(); Entry<Key, Object> e; List<Attr> attres = new ArrayList<Attr>(); Attr attr; Collection.Key k; while (it.hasNext()) { e = it.next(); k = e.getKey(); attr = doc.createAttribute(Decision.isNumber(k.getString()) ? "attribute-" + k.getString() : k.getString()); attr.setValue(Caster.toString(e.getValue())); attres.add(attr); } return attres.toArray(new Attr[attres.size()]); } // Node Map and List Node[] nodes = _toNodeArray(doc, o); if (nodes != null) return toAttrArray(doc, nodes); // Single Text Node try { return new Attr[] { toAttr(doc, o) }; } catch (ExpressionException e) { throw new XMLException("can't cast Object of type " + Caster.toClassName(o) + " to a XML Attributes Array"); } }
python
def get_ratefactor(self, base, code): """ Return the Decimal currency exchange rate factor of 'code' compared to 1 'base' unit, or RuntimeError Yahoo currently uses USD as base currency, but here we detect it with get_baserate """ raise RuntimeError("%s Deprecated: API withdrawn in February 2018" % self.name) try: rate = self.get_rate(code) except RuntimeError: # fallback return self.get_singlerate(base, code) self.check_ratebase(rate) ratefactor = Decimal(rate['price']) if base == self.base: return ratefactor else: return self.ratechangebase(ratefactor, self.base, base)
python
def change_parent(sender, instance, **kwargs): """ When the given flashcard has changed. Look at term and context and change the corresponding item relation. """ if instance.id is None: return if len({'term', 'term_id'} & set(instance.changed_fields)) != 0: diff = instance.diff parent = diff['term'][0] if 'term' in diff else diff['term_id'][0] child_id = instance.item_id if parent is not None: parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete() ItemRelation.objects.get_or_create(parent_id=instance.term.item_id, child_id=child_id, visible=True) if len({'term_secondary', 'term_secondary_id'} & set(instance.changed_fields)) != 0: diff = instance.diff child_id = instance.item_id parent = diff['term_secondary'][0] if 'term_secondary' in diff else diff['term_secondary_id'][0] if parent is not None: parent_id = parent.item_id if isinstance(parent, Term) else Term.objects.get(pk=parent).item_id ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete() if instance.term_secondary is not None or instance.term_secondary_id is not None: ItemRelation.objects.get_or_create(parent_id=instance.term_secondary.item_id, child_id=child_id, visible=True) if len({'context', 'context_id'} & set(instance.changed_fields)) != 0: diff = instance.diff parent = diff['context'][0] if 'context' in diff else diff['context_id'][0] child_id = instance.item_id if parent is not None: parent_id = parent.item_id if isinstance(parent, Context) else Context.objects.get(pk=parent).item_id ItemRelation.objects.filter(parent_id=parent_id, child_id=child_id).delete() ItemRelation.objects.get_or_create(parent_id=instance.context.item_id, child_id=child_id, visible=True)
python
def signature(self, name, file_name, file_type, file_text, **kwargs): """Add Signature data to Batch object. Valid file_types: + Snort ® + Suricata + YARA + ClamAV ® + OpenIOC + CybOX ™ + Bro + Regex + SPL - Splunk ® Search Processing Language Args: name (str): The name for this Group. file_name (str): The name for the attached signature for this Group. file_type (str): The signature type for this Group. file_text (str): The signature content for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Signature. """ group_obj = Signature(name, file_name, file_type, file_text, **kwargs) return self._group(group_obj)
java
public static void setInt(MemorySegment[] segments, int offset, int value) { if (inFirstSegment(segments, offset, 4)) { segments[0].putInt(offset, value); } else { setIntMultiSegments(segments, offset, value); } }
java
public final <K> Date parse(K value) { if (value == null) { return null; } try { if (value instanceof Date) { Date date = (Date) value; if (date.getTime() == Long.MAX_VALUE || date.getTime() == Long.MIN_VALUE) { return date; } else { String string = formatter.get().format(date); return formatter.get().parse(string); } } else if (value instanceof UUID) { long timestamp = UUIDGen.unixTimestamp((UUID) value); Date date = new Date(timestamp); return formatter.get().parse(formatter.get().format(date)); } else if (Number.class.isAssignableFrom(value.getClass())) { Long number = ((Number) value).longValue(); return formatter.get().parse(number.toString()); } else { return formatter.get().parse(value.toString()); } } catch (Exception e) { throw new IndexException(e, "Error parsing {} with value '{}' using date pattern {}", value.getClass().getSimpleName(), value, pattern); } }
java
public static <T> BigInteger sumOfBigInteger(Iterable<T> iterable, Function<? super T, BigInteger> function) { if (iterable instanceof List) { return ListIterate.sumOfBigInteger((List<T>) iterable, function); } if (iterable != null) { return IterableIterate.sumOfBigInteger(iterable, function); } throw new IllegalArgumentException("Cannot perform an sumOfBigDecimal on null"); }
java
@Override public void init(CollectorManager collectorMgr) { try { this.collectorMgr = collectorMgr; this.collectorMgr.subscribe(this, sourceIds); } catch (Exception e) { } }
python
def get_prepared_include_exclude(attributes): """Return tuple with prepared __include__ and __exclude__ attributes. :type attributes: dict :rtype: tuple """ attrs = dict() for attr in ('__include__', '__exclude__'): attrs[attr] = tuple([item.name for item in attributes.get(attr, tuple())]) return attrs['__include__'], attrs['__exclude__']
java
private void completeMultipart(String bucketName, String objectName, String uploadId, Part[] parts) throws InvalidBucketNameException, NoSuchAlgorithmException, InsufficientDataException, IOException, InvalidKeyException, NoResponseException, XmlPullParserException, ErrorResponseException, InternalException { Map<String,String> queryParamMap = new HashMap<>(); queryParamMap.put(UPLOAD_ID, uploadId); CompleteMultipartUpload completeManifest = new CompleteMultipartUpload(parts); HttpResponse response = executePost(bucketName, objectName, null, queryParamMap, completeManifest); // Fixing issue https://github.com/minio/minio-java/issues/391 String bodyContent = ""; Scanner scanner = new Scanner(response.body().charStream()); try { // read entire body stream to string. scanner.useDelimiter("\\A"); if (scanner.hasNext()) { bodyContent = scanner.next(); } } finally { response.body().close(); scanner.close(); } bodyContent = bodyContent.trim(); if (!bodyContent.isEmpty()) { ErrorResponse errorResponse = new ErrorResponse(new StringReader(bodyContent)); if (errorResponse.code() != null) { throw new ErrorResponseException(errorResponse, response.response()); } } }
python
def seq_seqhash(seq, normalize=True): """returns 24-byte Truncated Digest sequence `seq` >>> seq_seqhash("") 'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc' >>> seq_seqhash("ACGT") 'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2' >>> seq_seqhash("acgt") 'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2' >>> seq_seqhash("acgt", normalize=False) 'eFwawHHdibaZBDcs9kW3gm31h1NNJcQe' """ seq = normalize_sequence(seq) if normalize else seq return str(vmc_digest(seq, digest_size=24))
python
def _grains(): ''' Get the grains from the proxied device ''' (username, password) = _find_credentials() r = salt.modules.dracr.system_info(host=__pillar__['proxy']['host'], admin_username=username, admin_password=password) if r.get('retcode', 0) == 0: GRAINS_CACHE = r else: GRAINS_CACHE = {} GRAINS_CACHE.update(salt.modules.dracr.inventory(host=__pillar__['proxy']['host'], admin_username=username, admin_password=password)) return GRAINS_CACHE
java
public static <K, V> V createIfAbsentUnchecked(final ConcurrentMap<K, V> map, final K key, final ConcurrentInitializer<V> init) { try { return createIfAbsent(map, key, init); } catch (final ConcurrentException cex) { throw new ConcurrentRuntimeException(cex.getCause()); } }
java
private void evictMap(SampleableConcurrentHashMap<?, ?> map, int triggeringEvictionSize, int afterEvictionSize) { map.purgeStaleEntries(); int mapSize = map.size(); if (mapSize - triggeringEvictionSize >= 0) { for (SampleableConcurrentHashMap.SamplingEntry entry : map.getRandomSamples(mapSize - afterEvictionSize)) { map.remove(entry.getEntryKey()); } } }
python
def once(ctx, name): """Run kibitzr checks once and exit""" from kibitzr.app import Application app = Application() sys.exit(app.run(once=True, log_level=ctx.obj['log_level'], names=name))
java
public static int checkEncodedDataLengthInChars(String data, int[] mainTable, int[] extentionTable) { if (data == null) return 0; if (mainTable == null) return 0; int cnt = 0; for (int i1 = 0; i1 < data.length(); i1++) { char c = data.charAt(i1); boolean found = false; for (int i = 0; i < mainTable.length; i++) { if (mainTable[i] == c) { found = true; cnt++; break; } } if (!found && extentionTable != null) { for (int i = 0; i < extentionTable.length; i++) { if (c != 0 && extentionTable[i] == c) { found = true; cnt += 2; break; } } } if (!found) cnt++; } return cnt; }
java
private String removePrefix(String key) { if (key.startsWith(PREFIX_TYPE)) { return key.substring(PREFIX_TYPE.length()); } return key; }
java
@TargetApi(Build.VERSION_CODES.FROYO) public static File getExternalFilesDirForDownloads(Context context) { return context.getExternalFilesDir(Environment.DIRECTORY_DOWNLOADS); }
java
@Override public EClass getObjectAdded() { if (objectAddedEClass == null) { objectAddedEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(StorePackage.eNS_URI).getEClassifiers().get(37); } return objectAddedEClass; }
python
def getPutData(request): """Adds raw post to the PUT and DELETE querydicts on the request so they behave like post :param request: Request object to add PUT/DELETE to :type request: Request """ dataDict = {} data = request.body for n in urlparse.parse_qsl(data): dataDict[n[0]] = n[1] setattr(request, 'PUT', dataDict) setattr(request, 'DELETE', dataDict)
python
def command(self, ns, raw, **kw): """ Executes command. { "op" : "c", "ns" : "testdb.$cmd", "o" : { "drop" : "fs.files"} } """ try: dbname = raw['ns'].split('.', 1)[0] self.dest[dbname].command(raw['o'], check=True) except OperationFailure, e: logging.warning(e)
python
def convert_boxed_text_elements(self): """ Textual material that is part of the body of text but outside the flow of the narrative text, for example, a sidebar, marginalia, text insert (whether enclosed in a box or not), caution, tip, note box, etc. <boxed-text> elements for PLoS appear to all contain a single <sec> element which frequently contains a <title> and various other content. This method will elevate the <sec> element, adding class information as well as processing the title. """ for boxed_text in self.main.getroot().findall('.//boxed-text'): sec_el = boxed_text.find('sec') if sec_el is not None: sec_el.tag = 'div' title = sec_el.find('title') if title is not None: title.tag = 'b' sec_el.attrib['class'] = 'boxed-text' if 'id' in boxed_text.attrib: sec_el.attrib['id'] = boxed_text.attrib['id'] replace(boxed_text, sec_el) else: div_el = etree.Element('div', {'class': 'boxed-text'}) if 'id' in boxed_text.attrib: div_el.attrib['id'] = boxed_text.attrib['id'] append_all_below(div_el, boxed_text) replace(boxed_text, div_el)
java
public void activate(InetAddress host, int port, Collection<String> nonProxyHosts) { for (String scheme : new String[] {"http", "https"}) { String currentProxyHost = System.getProperty(scheme + ".proxyHost"); String currentProxyPort = System.getProperty(scheme + ".proxyPort"); if (currentProxyHost != null) { originalProxies.put(scheme, new InetSocketAddress(currentProxyHost, Integer.parseInt(currentProxyPort))); } System.setProperty(scheme + ".proxyHost", new InetSocketAddress(host, port).getHostString()); System.setProperty(scheme + ".proxyPort", Integer.toString(port)); } String currentNonProxyHosts = System.getProperty("http.nonProxyHosts"); if (currentNonProxyHosts == null) { originalNonProxyHosts.clear(); } else { for (String nonProxyHost : Splitter.on('|').split(currentNonProxyHosts)) { originalNonProxyHosts.add(nonProxyHost); } } System.setProperty("http.nonProxyHosts", Joiner.on('|').join(nonProxyHosts)); }
java
private void visitFrameAfterMethodReturnCallback() { if (!visitFramesAfterCallbacks) return; Type returnType = getReturnTypeForTrace(); if (!Type.VOID_TYPE.equals(getReturnTypeForTrace()) && !isConstructor()) { Object typeDescriptor = null; switch (returnType.getSort()) { case Type.BOOLEAN: case Type.BYTE: case Type.CHAR: case Type.INT: case Type.SHORT: typeDescriptor = INTEGER; break; case Type.DOUBLE: typeDescriptor = DOUBLE; break; case Type.FLOAT: typeDescriptor = FLOAT; break; case Type.LONG: typeDescriptor = LONG; break; default: typeDescriptor = returnType.getInternalName(); break; } visitFrame(F_SAME1, 0, null, 1, new Object[] { typeDescriptor }); } else { visitFrame(F_SAME, 0, null, 0, null); } }
python
def complex_filter(self, filter_obj): """ Returns a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Filter): clone = self._clone() clone._filters.add(filter_obj) return clone return self._filter_or_exclude(None, **filter_obj)
python
def reader(self): """ Reads raw text from the connection stream. Ensures proper exception handling. :return bytes: request """ request_stream = '' with self.connect() as request: if request.msg != 'OK': raise HTTPError request_stream = request.read().decode('utf-8') return request_stream
python
def remove_old_dumps(connection, container: str, days=None): """Remove dumps older than x days """ if not days: return if days < 20: LOG.error('A minimum of 20 backups is stored') return options = return_file_objects(connection, container) for dt, o_info in options: now = datetime.datetime.now() delta = now - dt if delta.days > days: LOG.info('Deleting %s', o_info['name']) objectstore.delete_object(connection, container, o_info)
java
private UsernamePasswordCredentials getCreds(String url) throws Exception { url = normalizeURL(url); url = url.substring(url.indexOf("/") + 2); UsernamePasswordCredentials longestMatch = null; int longestMatchLength = 0; Iterator<String> iter = m_creds.keySet().iterator(); while (iter.hasNext()) { String realmPath = (String) iter.next(); if (url.startsWith(realmPath)) { int matchLength = realmPath.length(); if (matchLength > longestMatchLength) { longestMatchLength = matchLength; longestMatch = (UsernamePasswordCredentials) m_creds .get(realmPath); } } } return longestMatch; }
java
public static <T1, T2, T3, R> BiFunction<T1, T2, R> compose(Function<T3, R> unary, BiFunction<T1, T2, T3> binary) { dbc.precondition(unary != null, "cannot compose a null unary function"); dbc.precondition(binary != null, "cannot compose a null binary function"); return binary.andThen(unary); }
java
public static CompressionMetadata create(String dataFilePath) { Descriptor desc = Descriptor.fromFilename(dataFilePath); return new CompressionMetadata(desc.filenameFor(Component.COMPRESSION_INFO), new File(dataFilePath).length(), desc.version.hasPostCompressionAdlerChecksums); }
python
def create_resource(self, parent_id=""): """Create the specified resource. Args: parent_id (str): The resource ID of the parent resource in API Gateway """ resource_name = self.trigger_settings.get('resource', '') resource_name = resource_name.replace('/', '') if not self.resource_id: created_resource = self.client.create_resource( restApiId=self.api_id, parentId=parent_id, pathPart=resource_name) self.resource_id = created_resource['id'] self.log.info("Successfully created resource") else: self.log.info("Resource already exists. To update resource please delete existing resource: %s", resource_name)
java
@Override protected void onValidation(final String contactNode, final String defaultPort) { if (contactNode != null) { // allow configuration as comma-separated list of host:port // addresses without the default port boolean allAddressesHaveHostAndPort = true; for (String node : contactNode.split(Constants.COMMA)) { if (StringUtils.countMatches(node, Constants.COLON) == 1) { // node is given with hostname and port // count == 1 is to exclude IPv6 addresses if (StringUtils.isNumeric(node.split(Constants.COLON)[1])) { continue; } } allAddressesHaveHostAndPort = false; break; } if (allAddressesHaveHostAndPort) { return; } } // fall back to the generic validation which requires the default port // to be set super.onValidation(contactNode, defaultPort); }
java
public static Map<?, ?> parseJson(String body) { JSONReader jr = new JSONValidatingReader(); Object obj = jr.read(body); if (obj instanceof Map<?, ?>) { return (Map<?, ?>) obj; } else { return null; } }
python
def close(self, virtual_account_id, data={}, **kwargs): """" Close Virtual Account from given Id Args: virtual_account_id : Id for which Virtual Account objects has to be Closed """ url = "{}/{}".format(self.base_url, virtual_account_id) data['status'] = 'closed' return self.patch_url(url, data, **kwargs)
java
public ApiResponse<List<Object>> initializePostWithHttpInfo(InitializeRequest request) throws ApiException { com.squareup.okhttp.Call call = initializePostValidateBeforeCall(request, null, null); Type localVarReturnType = new TypeToken<List<Object>>(){}.getType(); return apiClient.execute(call, localVarReturnType); }
python
def is_all_field_none(self): """ :rtype: bool """ if self._user_alias is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._status is not None: return False if self._sub_status is not None: return False if self._time_start_desired is not None: return False if self._time_start_actual is not None: return False if self._time_end is not None: return False if self._attachment is not None: return False return True
java
protected boolean isAccessPermitted(String resourceName) { return !resourceName.startsWith(JawrConstant.WEB_INF_DIR_PREFIX) && !resourceName.startsWith(JawrConstant.META_INF_DIR_PREFIX); }
python
def dispose(self): """Disposes of this events writer manager, making it no longer usable. Call this method when this object is done being used in order to clean up resources and handlers. This method should ever only be called once. """ self._lock.acquire() self._events_writer.Close() self._events_writer = None self._lock.release()
python
def split_path(path_): """ Split the requested path into (locale, path). locale will be empty if it isn't found. """ path = path_.lstrip('/') # Use partitition instead of split since it always returns 3 parts first, _, rest = path.partition('/') lang = first.lower() if lang in settings.LANGUAGE_URL_MAP: return settings.LANGUAGE_URL_MAP[lang], rest else: supported = find_supported(first) if len(supported): return supported[0], rest else: return '', path
java
@Override public void storeBundle(String bundleName, JoinableResourceBundleContent bundleResourcesContent) { // Text version String bundleContent = bundleResourcesContent.getContent().toString(); storeBundle(bundleName, bundleContent, false, textDirPath); // binary version storeBundle(bundleName, bundleContent, true, gzipDirPath); }
python
def _decode_next_layer(self, dict_, proto=None, length=None, *, version=4, ipv6_exthdr=None): """Decode next layer extractor. Positional arguments: * dict_ -- dict, info buffer * proto -- str, next layer protocol name * length -- int, valid (not padding) length Keyword Arguments: * version -- int, IP version (4 in default) <keyword> 4 / 6 * ext_proto -- ProtoChain, ProtoChain of IPv6 extension headers Returns: * dict -- current protocol with next layer extracted """ if self._onerror: next_ = beholder(self._import_next_layer)(self, proto, length, version=version) else: next_ = self._import_next_layer(proto, length, version=version) info, chain = next_.info, next_.protochain # make next layer protocol name layer = next_.alias.lower() # proto = next_.__class__.__name__ # write info and protocol chain into dict dict_[layer] = info self._next = next_ if ipv6_exthdr is not None: for proto in reversed(ipv6_exthdr): chain = ProtoChain(proto.__class__, proto.alias, basis=chain) self._protos = ProtoChain(self.__class__, self.alias, basis=chain) return dict_
java
public void putObject(String bucketName, String objectName, InputStream stream, long size, ServerSideEncryption sse) throws InvalidBucketNameException, NoSuchAlgorithmException, IOException, InvalidKeyException, NoResponseException, XmlPullParserException, ErrorResponseException, InternalException, InvalidArgumentException, InsufficientDataException { if ((sse.getType() == (ServerSideEncryption.Type.SSE_C)) && (!this.baseUrl.isHttps())) { throw new InvalidArgumentException("SSE_C operations must be performed over a secure connection."); } else if ((sse.getType() == (ServerSideEncryption.Type.SSE_KMS)) && (!this.baseUrl.isHttps())) { throw new InvalidArgumentException("SSE_KMS operations must be performed over a secure connection."); } Map<String, String> headerMap = new HashMap<>(); putObject(bucketName, objectName, size, new BufferedInputStream(stream), headerMap, sse); }
python
def infer_type(expr, scope): """Try to infer the type of x.y if y is a known value (literal).""" # Do we know what the member is? if isinstance(expr.member, ast.Literal): member = expr.member.value else: return protocol.AnyType container_type = infer_type(expr.obj, scope) try: # We are not using lexical scope here on purpose - we want to see what # the type of the member is only on the container_type. return structured.reflect(container_type, member) or protocol.AnyType except NotImplementedError: return protocol.AnyType
java
public String oppositeField(String fldName) { if (lhs.isFieldName() && lhs.asFieldName().equals(fldName) && rhs.isFieldName()) return rhs.asFieldName(); if (rhs.isFieldName() && rhs.asFieldName().equals(fldName) && lhs.isFieldName()) return lhs.asFieldName(); return null; }
python
def setUserKeyCredentials(self, username, public_key=None, private_key=None): """Set these properties in ``disk.0.os.credentials``.""" self.setCredentialValues(username=username, public_key=public_key, private_key=private_key)
python
def audit_1_12(self): """1.12 Ensure no root account access key exists (Scored)""" for row in self.credential_report: if row["user"] == "<root_account>": self.assertFalse(json.loads(row["access_key_1_active"])) self.assertFalse(json.loads(row["access_key_2_active"]))
python
def getProjectionForQueryName(self, query_name): """ TODO: DOCUMENT !! Returns None if no such projection exists """ projectionFileName = query_name + '.pyql' projectionText = self._getText(projectionFileName) return projectionText
python
def delete_store_credit_by_id(cls, store_credit_id, **kwargs): """Delete StoreCredit Delete an instance of StoreCredit by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_by_id(store_credit_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_id: ID of storeCredit to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs) else: (data) = cls._delete_store_credit_by_id_with_http_info(store_credit_id, **kwargs) return data
python
def subdivide(vertices, faces, face_index=None): """ Subdivide a mesh into smaller triangles. Note that if `face_index` is passed, only those faces will be subdivided and their neighbors won't be modified making the mesh no longer "watertight." Parameters ---------- vertices : (n, 3) float Vertices in space faces : (n, 3) int Indexes of vertices which make up triangular faces face_index : faces to subdivide. if None: all faces of mesh will be subdivided if (n,) int array of indices: only specified faces Returns ---------- new_vertices : (n, 3) float Vertices in space new_faces : (n, 3) int Remeshed faces """ if face_index is None: face_index = np.arange(len(faces)) else: face_index = np.asanyarray(face_index) # the (c,3) int set of vertex indices faces = faces[face_index] # the (c, 3, 3) float set of points in the triangles triangles = vertices[faces] # the 3 midpoints of each triangle edge # stacked to a (3 * c, 3) float mid = np.vstack([triangles[:, g, :].mean(axis=1) for g in [[0, 1], [1, 2], [2, 0]]]) # for adjacent faces we are going to be generating # the same midpoint twice so merge them here mid_idx = (np.arange(len(face_index) * 3)).reshape((3, -1)).T unique, inverse = grouping.unique_rows(mid) mid = mid[unique] mid_idx = inverse[mid_idx] + len(vertices) # the new faces with correct winding f = np.column_stack([faces[:, 0], mid_idx[:, 0], mid_idx[:, 2], mid_idx[:, 0], faces[:, 1], mid_idx[:, 1], mid_idx[:, 2], mid_idx[:, 1], faces[:, 2], mid_idx[:, 0], mid_idx[:, 1], mid_idx[:, 2]]).reshape((-1, 3)) # add the 3 new faces per old face new_faces = np.vstack((faces, f[len(face_index):])) # replace the old face with a smaller face new_faces[face_index] = f[:len(face_index)] new_vertices = np.vstack((vertices, mid)) return new_vertices, new_faces
python
def get_attributes(self, template_pack=TEMPLATE_PACK): """ Used by crispy_forms_tags to get helper attributes """ items = { 'form_method': self.form_method.strip(), 'form_tag': self.form_tag, 'form_style': self.form_style.strip(), 'form_show_errors': self.form_show_errors, 'help_text_inline': self.help_text_inline, 'error_text_inline': self.error_text_inline, 'html5_required': self.html5_required, 'form_show_labels': self.form_show_labels, 'disable_csrf': self.disable_csrf, 'label_class': self.label_class, 'field_class': self.field_class, 'include_media': self.include_media } if template_pack == 'bootstrap4': bootstrap_size_match = re.findall('col-(xl|lg|md|sm)-(\d+)', self.label_class) if bootstrap_size_match: if template_pack == 'bootstrap4': offset_pattern = 'offset-%s-%s' else: offset_pattern = 'col-%s-offset-%s' items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match] else: bootstrap_size_match = re.findall('col-(lg|md|sm|xs)-(\d+)', self.label_class) if bootstrap_size_match: if template_pack == 'bootstrap4': offset_pattern = 'offset-%s-%s' else: offset_pattern = 'col-%s-offset-%s' items['bootstrap_checkbox_offsets'] = [offset_pattern % m for m in bootstrap_size_match] items['attrs'] = {} if self.attrs: items['attrs'] = self.attrs.copy() if self.form_action: items['attrs']['action'] = self.form_action.strip() if self.form_id: items['attrs']['id'] = self.form_id.strip() if self.form_class: # uni_form TEMPLATE PACK has a uniForm class by default if template_pack == 'uni_form': items['attrs']['class'] = "uniForm %s" % self.form_class.strip() else: items['attrs']['class'] = self.form_class.strip() else: if template_pack == 'uni_form': items['attrs']['class'] = self.attrs.get('class', '') + " uniForm" if self.form_group_wrapper_class: items['attrs']['form_group_wrapper_class'] = self.form_group_wrapper_class items['flat_attrs'] = flatatt(items['attrs']) if self.inputs: items['inputs'] = self.inputs if self.form_error_title: items['form_error_title'] = self.form_error_title.strip() if self.formset_error_title: items['formset_error_title'] = self.formset_error_title.strip() for attribute_name, value in self.__dict__.items(): if attribute_name not in items and attribute_name not in ['layout', 'inputs'] and not attribute_name.startswith('_'): items[attribute_name] = value return items
java
public void setAttachedDiskMapping(java.util.Map<String, java.util.List<DiskMap>> attachedDiskMapping) { this.attachedDiskMapping = attachedDiskMapping; }
python
def hil_rc_inputs_raw_encode(self, time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi): ''' Sent from simulation to autopilot. The RAW values of the RC channels received. The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. Individual receivers/transmitters might violate this specification. time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) chan1_raw : RC channel 1 value, in microseconds (uint16_t) chan2_raw : RC channel 2 value, in microseconds (uint16_t) chan3_raw : RC channel 3 value, in microseconds (uint16_t) chan4_raw : RC channel 4 value, in microseconds (uint16_t) chan5_raw : RC channel 5 value, in microseconds (uint16_t) chan6_raw : RC channel 6 value, in microseconds (uint16_t) chan7_raw : RC channel 7 value, in microseconds (uint16_t) chan8_raw : RC channel 8 value, in microseconds (uint16_t) chan9_raw : RC channel 9 value, in microseconds (uint16_t) chan10_raw : RC channel 10 value, in microseconds (uint16_t) chan11_raw : RC channel 11 value, in microseconds (uint16_t) chan12_raw : RC channel 12 value, in microseconds (uint16_t) rssi : Receive signal strength indicator, 0: 0%, 255: 100% (uint8_t) ''' return MAVLink_hil_rc_inputs_raw_message(time_usec, chan1_raw, chan2_raw, chan3_raw, chan4_raw, chan5_raw, chan6_raw, chan7_raw, chan8_raw, chan9_raw, chan10_raw, chan11_raw, chan12_raw, rssi)
python
def normalized_mutual_info_score(self, reference_clusters): """ Calculates the normalized mutual information w.r.t. the reference clusters (explicit evaluation) :param reference_clusters: Clusters that are to be used as reference :return: The resulting normalized mutual information score. """ return normalized_mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters))
python
def _list(self): """List all the objects saved in the namespace. :param search_from: TBI :param search_to: TBI :param offset: TBI :param limit: max number of values to be shows. :return: list with transactions. """ all = self.driver.instance.metadata.get(search=self.namespace) list = [] for id in all: try: if not self._get(id['id']) in list: list.append(self._get(id['id'])) except Exception: pass return list
java
public void init(FieldTable table, String strStartDateTimeField, String strEndDateTimeField, String strDescriptionField, String strStatusField) { super.init(table); m_strStartDateTimeField = strStartDateTimeField; m_strEndDateTimeField = strEndDateTimeField; m_strDescriptionField = strDescriptionField; m_strStatusField = strStatusField; }
java
@Override public CommerceWarehouse findByGroupId_First(long groupId, OrderByComparator<CommerceWarehouse> orderByComparator) throws NoSuchWarehouseException { CommerceWarehouse commerceWarehouse = fetchByGroupId_First(groupId, orderByComparator); if (commerceWarehouse != null) { return commerceWarehouse; } StringBundler msg = new StringBundler(4); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("groupId="); msg.append(groupId); msg.append("}"); throw new NoSuchWarehouseException(msg.toString()); }
python
def solveOneCycle(agent,solution_last): ''' Solve one "cycle" of the dynamic model for one agent type. This function iterates over the periods within an agent's cycle, updating the time-varying parameters and passing them to the single period solver(s). Parameters ---------- agent : AgentType The microeconomic AgentType whose dynamic problem is to be solved. solution_last : Solution A representation of the solution of the period that comes after the end of the sequence of one period problems. This might be the term- inal period solution, a "pseudo terminal" solution, or simply the solution to the earliest period from the succeeding cycle. Returns ------- solution_cycle : [Solution] A list of one period solutions for one "cycle" of the AgentType's microeconomic model. Returns in reverse chronological order. ''' # Calculate number of periods per cycle, defaults to 1 if all variables are time invariant if len(agent.time_vary) > 0: name = agent.time_vary[0] T = len(eval('agent.' + name)) else: T = 1 # Check whether the same solution method is used in all periods always_same_solver = 'solveOnePeriod' not in agent.time_vary if always_same_solver: solveOnePeriod = agent.solveOnePeriod these_args = getArgNames(solveOnePeriod) # Construct a dictionary to be passed to the solver time_inv_string = '' for name in agent.time_inv: time_inv_string += ' \'' + name + '\' : agent.' +name + ',' time_vary_string = '' for name in agent.time_vary: time_vary_string += ' \'' + name + '\' : None,' solve_dict = eval('{' + time_inv_string + time_vary_string + '}') # Initialize the solution for this cycle, then iterate on periods solution_cycle = [] solution_next = solution_last for t in range(T): # Update which single period solver to use (if it depends on time) if not always_same_solver: solveOnePeriod = agent.solveOnePeriod[t] these_args = getArgNames(solveOnePeriod) # Update time-varying single period inputs for name in agent.time_vary: if name in these_args: solve_dict[name] = eval('agent.' + name + '[t]') solve_dict['solution_next'] = solution_next # Make a temporary dictionary for this period temp_dict = {name: solve_dict[name] for name in these_args} # Solve one period, add it to the solution, and move to the next period solution_t = solveOnePeriod(**temp_dict) solution_cycle.append(solution_t) solution_next = solution_t # Return the list of per-period solutions return solution_cycle
python
def _execute(self, parts, expectation=None, format_callback=None): """Really execute a redis command :param list parts: The list of command parts :param mixed expectation: Optional response expectation :rtype: :class:`~tornado.concurrent.Future` :raises: :exc:`~tredis.exceptions.SubscribedError` """ future = concurrent.TracebackFuture() try: command = self._build_command(parts) except ValueError as error: future.set_exception(error) return future def on_locked(_): if self.ready: if self._clustering: cmd = Command(command, self._pick_cluster_host(parts), expectation, format_callback) else: LOGGER.debug('Connection: %r', self._connection) cmd = Command(command, self._connection, expectation, format_callback) LOGGER.debug('_execute(%r, %r, %r) on %s', cmd.command, expectation, format_callback, cmd.connection.name) cmd.connection.execute(cmd, future) else: LOGGER.critical('Lock released & not ready, aborting command') # Wait until the cluster is ready, letting cluster discovery through if not self.ready and not self._connected.is_set(): self.io_loop.add_future( self._connected.wait(), lambda f: self.io_loop.add_future(self._busy.acquire(), on_locked) ) else: self.io_loop.add_future(self._busy.acquire(), on_locked) # Release the lock when the future is complete self.io_loop.add_future(future, lambda r: self._busy.release()) return future
python
def buildProtocol(self, addr): """Get a new LLRP client protocol object. Consult self.antenna_dict to look up antennas to use. """ self.resetDelay() # reset reconnection backoff state clargs = self.client_args.copy() # optionally configure antennas from self.antenna_dict, which looks # like {'10.0.0.1:5084': {'1': 'ant1', '2': 'ant2'}} hostport = '{}:{}'.format(addr.host, addr.port) logger.debug('Building protocol for %s', hostport) if hostport in self.antenna_dict: clargs['antennas'] = [ int(x) for x in self.antenna_dict[hostport].keys()] elif addr.host in self.antenna_dict: clargs['antennas'] = [ int(x) for x in self.antenna_dict[addr.host].keys()] logger.debug('Antennas in buildProtocol: %s', clargs.get('antennas')) logger.debug('%s start_inventory: %s', hostport, clargs.get('start_inventory')) if self.start_first and not self.protocols: # this is the first protocol, so let's start it inventorying clargs['start_inventory'] = True proto = LLRPClient(factory=self, **clargs) # register state-change callbacks with new client for state, cbs in self._state_callbacks.items(): for cb in cbs: proto.addStateCallback(state, cb) # register message callbacks with new client for msg_type, cbs in self._message_callbacks.items(): for cb in cbs: proto.addMessageCallback(msg_type, cb) return proto