language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def save(self, parent=None): """ Either creates a resource or updates it (if it already has an id). This will trigger an api POST or PATCH request. :returns: the resource itself """ if self.id: return self.update(parent=parent) else: return self.create(parent=parent)
java
private static Configuration injectTransactionManager(TransactionManagerLookupDelegator transactionManagerLookupDelegator, Configuration configuration) { if ( configuration.transaction().transactionMode() == TransactionMode.TRANSACTIONAL ) { ConfigurationBuilder builder = new ConfigurationBuilder().read( configuration ); builder.transaction() .transactionManagerLookup( transactionManagerLookupDelegator ); return builder.build(); } return configuration; }
python
def fastqmover(self): """Links .fastq files created above to :sequencepath""" # Create the sequence path if necessary make_path(self.sequencepath) # Iterate through all the sample names for sample in self.metadata.samples: # Make directory variables outputdir = os.path.join(self.sequencepath, sample.name) # Demultiplexed files will be present in the project name subfolder within the fastq destination folder if self.demultiplex: glob_dir = self.projectpath # Undemultiplexed reads (Undetermined_S0_R1_001.fastq.gz) are present in the fastq destination folder else: glob_dir = self.fastqdestination # Sometimes the files are put in self.fastqdestination rather than self.projectpath? Matt has this problem # and I don't understands what's going on if not os.path.isdir(glob_dir): glob_dir = os.path.dirname(glob_dir) # Glob all the .gz files in the subfolders - projectpath/Sample_:sample.name/*.gz for fastq in sorted(glob(os.path.join(glob_dir, '*.gz'))): fastqname = os.path.basename(fastq) # Set the name of the destination file outputfile = os.path.join(self.sequencepath, fastqname) # Link the file if it doesn't already exist if not os.path.isfile(outputfile): relative_symlink(src_file=fastq, output_dir=self.sequencepath) # Repopulate .strainfastqfiles with the freshly-linked/copied files if self.demultiplex: fastqfiles = glob(os.path.join(self.sequencepath, '{}*.fastq*'.format(sample.name))) fastqfiles = sorted([fastq for fastq in fastqfiles if 'trimmed' not in os.path.basename(fastq)]) # Undemultiplexed files will not have the sample name in the file name else: fastqfiles = sorted(glob(os.path.join(glob_dir, '*.gz'))) # Populate the metadata object with the name/path of the fastq files sample.general.fastqfiles = fastqfiles # Save the outputdir to the metadata object sample.run.outputdirectory = outputdir sample.general.outputdirectory = outputdir sample.general.bestassemblyfile = True sample.general.trimmedcorrectedfastqfiles = sorted(sample.general.fastqfiles) sample.general.logout = os.path.join(sample.general.outputdirectory, 'logout') sample.general.logerr = os.path.join(sample.general.outputdirectory, 'logerr') sample.commands = GenObject()
java
List<Endpoint> endpoints() { try { String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace); return enrichWithPublicAddresses(parsePodsList(callGet(urlString))); } catch (RestClientException e) { return handleKnownException(e); } }
java
@SuppressWarnings("deprecation") public <T> ResourceLeakDetector<T> newResourceLeakDetector(Class<T> resource, int samplingInterval) { return newResourceLeakDetector(resource, ResourceLeakDetector.SAMPLING_INTERVAL, Long.MAX_VALUE); }
python
def _batchify(self, data_source): """Load data from underlying arrays, internal use only.""" assert self.cursor < self.num_data, 'DataIter needs reset.' # first batch of next epoch with 'roll_over' if self.last_batch_handle == 'roll_over' and \ -self.batch_size < self.cursor < 0: assert self._cache_data is not None or self._cache_label is not None, \ 'next epoch should have cached data' cache_data = self._cache_data if self._cache_data is not None else self._cache_label second_data = self._getdata( data_source, end=self.cursor + self.batch_size) if self._cache_data is not None: self._cache_data = None else: self._cache_label = None return self._concat(cache_data, second_data) # last batch with 'pad' elif self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: pad = self.batch_size - self.num_data + self.cursor first_data = self._getdata(data_source, start=self.cursor) second_data = self._getdata(data_source, end=pad) return self._concat(first_data, second_data) # normal case else: if self.cursor + self.batch_size < self.num_data: end_idx = self.cursor + self.batch_size # get incomplete last batch else: end_idx = self.num_data return self._getdata(data_source, self.cursor, end_idx)
python
def get(self, key, *, default=None, cast_func=None, case_sensitive=None, raise_exception=None, warn_missing=None, use_cache=True, additional_sources=[]): """ Gets the setting specified by ``key``. For efficiency, we cache the retrieval of settings to avoid multiple searches through the sources list. :param str key: settings key to retrieve :param str default: use this as default value when the setting key is not found :param func cast_func: cast the value of the settings using this function :param bool case_sensitive: whether to make case sensitive comparisons for settings key :param bool raise_exception: whether to raise a :exc:`MissingSettingException` exception when the setting is not found :param bool warn_missing: whether to display a warning when the setting is not found :param list additional_sources: additional sources to search for the key; note that the values obtained here could be cached in a future call :returns: the setting value :rtype: str """ case_sensitive = self.case_sensitive if case_sensitive is None else case_sensitive raise_exception = self.raise_exception if raise_exception is None else raise_exception warn_missing = self.warn_missing if warn_missing is None else warn_missing if not case_sensitive: key = key.lower() if use_cache and key in self._cache: return cast_func(self._cache[key]) if cast_func else self._cache[key] found, value = False, None for source, settings in chain(self._settings.items(), map(self._load_settings_from_source, additional_sources)): if case_sensitive: if key in settings: found = True value = settings[key] else: continue else: possible_keys = [k for k in settings.keys() if k.lower() == key] if not possible_keys: continue else: if len(possible_keys) > 1: warnings.warn('There are more than one possible value for "{}" in <{}> settings due to case insensitivity.'.format(key, source)) found = True value = settings[possible_keys[0]] #end if #end if if found: break #end for if not found: if raise_exception: raise MissingSettingException('The "{}" setting is missing.'.format(key)) if warn_missing: warnings.warn('The "{}" setting is missing.'.format(key)) return default #end if if use_cache: self._cache[key] = value if cast_func: value = cast_func(value) return value
python
def _snake_case(cls, text): """ Transform text to snake cale (Based on SCREAMING_SNAKE_CASE) :param text: :return: """ if text.islower(): return text return cls._screaming_snake_case(text).lower()
java
public MapWithProtoValuesFluentAssertion<M> withPartialScopeForValues(FieldScope fieldScope) { return usingConfig(config.withPartialScope(checkNotNull(fieldScope, "fieldScope"))); }
java
public void addClassesSummary(ClassDoc[] classes, String label, String tableSummary, String[] tableHeader, Content packageSummaryContentTree) { addClassesSummary(classes, label, tableSummary, tableHeader, packageSummaryContentTree, profile.value); }
java
public static String format(final String code, final Properties options, final LineEnding lineEnding) { Check.notEmpty(code, "code"); Check.notEmpty(options, "options"); Check.notNull(lineEnding, "lineEnding"); final CodeFormatter formatter = ToolFactory.createCodeFormatter(options); final String lineSeparator = LineEnding.find(lineEnding, code); TextEdit te = null; try { te = formatter.format(CodeFormatter.K_COMPILATION_UNIT, code, 0, code.length(), 0, lineSeparator); } catch (final Exception formatFailed) { LOG.warn("Formatting failed", formatFailed); } String formattedCode = code; if (te == null) { LOG.info("Code cannot be formatted. Possible cause is unmatched source/target/compliance version."); } else { final IDocument doc = new Document(code); try { te.apply(doc); } catch (final Exception e) { LOG.warn(e.getLocalizedMessage(), e); } formattedCode = doc.get(); } return formattedCode; }
java
@SuppressWarnings("unchecked") public <KP extends Object> KP getKeyPart(final Class<KP> keyPartClass) { return (KP) getListKeyPart().stream() .filter(kp -> kp != null && keyPartClass.isAssignableFrom(kp.getClass())) .findFirst() .get(); }
java
public static int mulAndCheck (int x, int y) throws ArithmeticException { long m = ((long)x) * ((long)y); if (m < Integer.MIN_VALUE || m > Integer.MAX_VALUE) { throw new ArithmeticException(); } return (int)m; }
python
def get_lrc(self): """ 返回当前播放歌曲歌词 """ if self._playingsong != self._pre_playingsong: self._lrc = douban.get_lrc(self._playingsong) self._pre_playingsong = self._playingsong return self._lrc
java
@Override public Iterator findMembers(IEntityGroup eg) throws GroupsException { Collection members = new ArrayList(10); Iterator it = null; for (it = findMemberGroups(eg); it.hasNext(); ) { members.add(it.next()); } for (it = findMemberEntities(eg); it.hasNext(); ) { members.add(it.next()); } return members.iterator(); }
python
def encode_payload(cls, payload): '''Encode a Python object as JSON and convert it to bytes.''' try: return json.dumps(payload).encode() except TypeError: msg = f'JSON payload encoding error: {payload}' raise ProtocolError(cls.INTERNAL_ERROR, msg) from None
python
def get_mx(self, tree: Union[ast.Symbol, ast.ComponentRef, ast.Expression]) -> ca.MX: """ We pull components and symbols from the AST on demand. This is to ensure that parametrized vector dimensions can be resolved. Vector dimensions need to be known at CasADi MX creation time. :param tree: :return: """ if tree not in self.src: if isinstance(tree, ast.Symbol): s = self.get_symbol(tree) elif isinstance(tree, ast.ComponentRef): s = self.get_component(tree) else: raise Exception('Tried to look up expression before it was reached by the tree walker') self.src[tree] = s return self.src[tree]
python
def handle_template(bot_or_project, name, target=None, **options): """ Copy either a bot layout template or a Trading-Bots project layout template into the specified directory. :param bot_or_project: The string 'bot' or 'project'. :param name: The name of the bot or project. :param target: The directory to which the template should be copied. :param options: The additional variables passed to project or bot templates """ bot_or_project = bot_or_project paths_to_remove = [] verbosity = int(options['verbosity']) validate_name(name, bot_or_project) # if some directory is given, make sure it's nicely expanded if target is None: top_dir = path.join(os.getcwd(), name) try: os.makedirs(top_dir) except FileExistsError: raise click.ClickException("'%s' already exists" % top_dir) except OSError as e: raise click.ClickException(e) else: top_dir = os.path.abspath(path.expanduser(target)) if not os.path.exists(top_dir): raise click.ClickException("Destination directory '%s' does not " "exist, please create it first." % top_dir) base_name = '%s_name' % bot_or_project base_subdir = '%s_template' % bot_or_project base_directory = '%s_directory' % bot_or_project target_name = '%s_target' % bot_or_project pascal_case_name = 'pascal_case_%s_name' % bot_or_project pascal_case_value = stringcase.pascalcase(name) snake_case_name = 'snake_case_%s_name' % bot_or_project snake_case_value = stringcase.snakecase(name) context = { **options, base_name: name, base_directory: top_dir, target_name: target, pascal_case_name: pascal_case_value, snake_case_name: snake_case_value, 'settings_files': defaults.SETTINGS, 'version': getattr(trading_bots.__version__, '__version__'), } # Setup a stub settings environment for template rendering settings.configure() trading_bots.setup() template_dir = path.join(trading_bots.__path__[0], 'conf', base_subdir) prefix_length = len(template_dir) + 1 for root, dirs, files in os.walk(template_dir): path_rest = root[prefix_length:] relative_dir = path_rest.replace(snake_case_name, snake_case_value) if relative_dir: target_dir = path.join(top_dir, relative_dir) if not path.exists(target_dir): os.mkdir(target_dir) for dirname in dirs[:]: if dirname.startswith('.') or dirname == '__pycache__': dirs.remove(dirname) for filename in files: if filename.endswith(('.pyo', '.pyc', '.py.class')): # Ignore some files as they cause various breakages. continue old_path = path.join(root, filename) new_path = path.join(top_dir, relative_dir, filename.replace(snake_case_name, snake_case_value)) for old_suffix, new_suffix in rewrite_template_suffixes: if new_path.endswith(old_suffix): new_path = new_path[:-len(old_suffix)] + new_suffix break # Only rewrite once if path.exists(new_path): raise click.ClickException("%s already exists, overlaying a " "project or bot into an existing " "directory won't replace conflicting " "files" % new_path) # Only render the Python files, as we don't want to # accidentally render Trading-Bots templates files if new_path.endswith(extensions): with open(old_path, 'r', encoding='utf-8') as template_file: content = template_file.read() template = Template(content, keep_trailing_newline=True) content = template.render(**context) with open(new_path, 'w', encoding='utf-8') as new_file: new_file.write(content) else: shutil.copyfile(old_path, new_path) if verbosity >= 2: click.echo("Creating %s\n" % new_path) try: shutil.copymode(old_path, new_path) make_writeable(new_path) except OSError: click.echo( "Notice: Couldn't set permission bits on %s. You're " "probably using an uncommon filesystem setup. No " "problem." % new_path) if paths_to_remove: if verbosity >= 2: click.echo("Cleaning up temporary files.\n") for path_to_remove in paths_to_remove: if path.isfile(path_to_remove): os.remove(path_to_remove) else: shutil.rmtree(path_to_remove)
java
public static base_responses add(nitro_service client, cachepolicylabel resources[]) throws Exception { base_responses result = null; if (resources != null && resources.length > 0) { cachepolicylabel addresources[] = new cachepolicylabel[resources.length]; for (int i=0;i<resources.length;i++){ addresources[i] = new cachepolicylabel(); addresources[i].labelname = resources[i].labelname; addresources[i].evaluates = resources[i].evaluates; } result = add_bulk_request(client, addresources); } return result; }
java
protected void release(ByteArray byteArray) { if (byteArray instanceof PooledByteArray) { PooledByteArray pooledArray = (PooledByteArray) byteArray; if (pooledArray.release()) { this.byteArrayPool.release(byteArray.getBytes()); } } }
java
public static int Maximum(ImageSource fastBitmap, int startX, int startY, int width, int height) { int max = 0; if (fastBitmap.isGrayscale()) { for (int i = startX; i < height; i++) { for (int j = startY; j < width; j++) { int gray = fastBitmap.getRGB(j, i); if (gray > max) { max = gray; } } } } else { for (int i = startX; i < height; i++) { for (int j = startY; j < width; j++) { int gray = fastBitmap.getG(j, i); if (gray > max) { max = gray; } } } } return max; }
python
def replace_uid(old_uwnetid, new_uwnetid, no_custom_fields=True): """ Return a list of BridgeUser objects without custom fields """ url = author_uid_url(old_uwnetid) if not no_custom_fields: url += ("?%s" % CUSTOM_FIELD) resp = patch_resource(url, '{"user":{"uid":"%[email protected]"}}' % new_uwnetid) return _process_json_resp_data(resp, no_custom_fields=no_custom_fields)
java
public ListShardsResult withShards(Shard... shards) { if (this.shards == null) { setShards(new com.amazonaws.internal.SdkInternalList<Shard>(shards.length)); } for (Shard ele : shards) { this.shards.add(ele); } return this; }
java
public static List<File> changeAllFilenameSuffix(final File file, final String oldSuffix, final String newSuffix, final boolean delete) throws IOException, FileDoesNotExistException, FileIsADirectoryException { boolean success; List<File> notDeletedFiles = null; final String filePath = file.getAbsolutePath(); final String suffix[] = { oldSuffix }; final List<File> files = FileSearchExtensions.findFiles(filePath, suffix); final int fileCount = files.size(); for (int i = 0; i < fileCount; i++) { final File currentFile = files.get(i); success = RenameFileExtensions.changeFilenameSuffix(currentFile, newSuffix, delete); if (!success) { if (null != notDeletedFiles) { notDeletedFiles.add(currentFile); } else { notDeletedFiles = new ArrayList<>(); notDeletedFiles.add(currentFile); } } } return notDeletedFiles; }
python
def replace_payment_transaction_by_id(cls, payment_transaction_id, payment_transaction, **kwargs): """Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) else: (data) = cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) return data
python
def iterRun(self, sqlTail = '', raw = False) : """Compile filters and run the query and returns an iterator. This much more efficient for large data sets but you get the results one element at a time. One thing to keep in mind is that this function keeps the cursor open, that means that the sqlite databae is locked (no updates/inserts etc...) until all the elements have been fetched. For batch updates to the database, preload the results into a list using get, then do you updates. You can use sqlTail to add things such as order by If raw, returns the raw tuple data (not wrapped into a raba object)""" sql, sqlValues = self.getSQLQuery() cur = self.con.execute('%s %s'% (sql, sqlTail), sqlValues) for v in cur : if not raw : yield RabaPupa(self.rabaClass, v[0]) else : yield v
java
private static Matrix readMatlabSparse( File matrixFile, Type matrixType, boolean transposeOnRead) throws IOException { Matrix matrix = new GrowingSparseMatrix(); BufferedReader br = new BufferedReader(new FileReader(matrixFile)); for (String line = null; (line = br.readLine()) != null; ) { String[] rowColVal = line.split("\\s+"); int row = Integer.parseInt(rowColVal[0]) - 1; int col = Integer.parseInt(rowColVal[1]) - 1; double value = Double.parseDouble(rowColVal[2]); if (transposeOnRead) matrix.set(col, row, value); else matrix.set(row, col, value); } br.close(); return matrix; }
python
def compile_cxxfile(module_name, cxxfile, output_binary=None, **kwargs): '''c++ file -> native module Return the filename of the produced shared library Raises CompileError on failure ''' builddir = mkdtemp() buildtmp = mkdtemp() extension_args = make_extension(python=True, **kwargs) extension = PythranExtension(module_name, [cxxfile], **extension_args) try: setup(name=module_name, ext_modules=[extension], cmdclass={"build_ext": PythranBuildExt}, # fake CLI call script_name='setup.py', script_args=['--verbose' if logger.isEnabledFor(logging.INFO) else '--quiet', 'build_ext', '--build-lib', builddir, '--build-temp', buildtmp] ) except SystemExit as e: raise CompileError(str(e)) def copy(src_file, dest_file): # not using shutil.copy because it fails to copy stat across devices with open(src_file, 'rb') as src: with open(dest_file, 'wb') as dest: dest.write(src.read()) ext = sysconfig.get_config_var('SO') # Copy all generated files including the module name prefix (.pdb, ...) for f in glob.glob(os.path.join(builddir, module_name + "*")): if f.endswith(ext): if not output_binary: output_binary = os.path.join(os.getcwd(), module_name + ext) copy(f, output_binary) else: if not output_binary: output_directory = os.getcwd() else: output_directory = os.path.dirname(output_binary) copy(f, os.path.join(output_directory, os.path.basename(f))) shutil.rmtree(builddir) shutil.rmtree(buildtmp) logger.info("Generated module: " + module_name) logger.info("Output: " + output_binary) return output_binary
java
public ErrorPage getErrorPageTraverseRootCause(Throwable th) { while (th != null && th instanceof ServletException) { // defect 155880 // - Check // rootcause != // null Throwable rootCause = ((ServletException) th).getRootCause(); if (rootCause == null) { break; } ErrorPage er = getErrorPageByExceptionType(th); if (er != null) return er; th = rootCause; } if (th != null) return getErrorPageByExceptionType(th); return null; }
python
def isRef(self, doc, attr): """Determine whether an attribute is of type Ref. In case we have DTD(s) then this is simple, otherwise we use an heuristic: name Ref (upper or lowercase). """ if doc is None: doc__o = None else: doc__o = doc._o if attr is None: attr__o = None else: attr__o = attr._o ret = libxml2mod.xmlIsRef(doc__o, self._o, attr__o) return ret
java
public void registerProvider(Object provider) { if (provider != null) { Collection<Object> providerList = new ArrayList<>(1); providerList.add(provider); registerProviders(providerList); } }
java
public void setCalendar(Calendar calendar) { if (calendar == null) { dateEditor.setDate(null); } else { dateEditor.setDate(calendar.getTime()); } }
python
def _getVirtualScreenBitmap(self): """ Returns a PIL bitmap (BGR channel order) of all monitors Arranged like the Virtual Screen """ # Collect information about the virtual screen & monitors min_x, min_y, screen_width, screen_height = self._getVirtualScreenRect() monitors = self._getMonitorInfo() # Initialize new black image the size of the virtual screen virt_screen = Image.new("RGB", (screen_width, screen_height)) # Capture images of each of the monitors and overlay on the virtual screen for monitor_id in range(0, len(monitors)): img = self._captureScreen(monitors[monitor_id]["name"]) # Capture virtscreen coordinates of monitor x1, y1, x2, y2 = monitors[monitor_id]["rect"] # Convert to image-local coordinates x = x1 - min_x y = y1 - min_y # Paste on the virtual screen virt_screen.paste(img, (x, y)) return virt_screen
java
@SuppressWarnings("rawtypes") public MonetaryAmountFactory getMonetaryAmountFactory() { MonetaryAmountFactory factory = get(MonetaryAmountFactory.class); if (factory == null) { return Monetary.getDefaultAmountFactory(); } return factory; }
java
public void setGatewayGroups(java.util.Collection<GatewayGroupSummary> gatewayGroups) { if (gatewayGroups == null) { this.gatewayGroups = null; return; } this.gatewayGroups = new java.util.ArrayList<GatewayGroupSummary>(gatewayGroups); }
python
def flatten_unique(l: Iterable) -> List: """ Return a list of UNIQUE non-list items in l """ rval = OrderedDict() for e in l: if not isinstance(e, str) and isinstance(e, Iterable): for ev in flatten_unique(e): rval[ev] = None else: rval[e] = None return list(rval.keys())
python
def connection_made(self, transport): """Do the websocket handshake. According to https://tools.ietf.org/html/rfc6455 """ randomness = os.urandom(16) key = base64encode(randomness).decode('utf-8').strip() self.transport = transport message = "GET / HTTP/1.1\r\n" message += "Host: " + self.host + ':' + str(self.port) + '\r\n' message += "User-Agent: Python/3.5 websockets/3.4\r\n" message += "Upgrade: Websocket\r\n" message += "Connection: Upgrade\r\n" message += "Sec-WebSocket-Key: " + key + "\r\n" message += "Sec-WebSocket-Version: 13\r\n" message += "\r\n" _LOGGER.debug('Websocket handshake: %s', message) self.transport.write(message.encode())
python
def _get_magnitude_scaling(self, C, mag): """ Implements the magnitude scaling function F(M) presented in equation 4 """ if mag < self.CONSTANTS["mh"]: return C["e1"] + C["b1"] * (mag - self.CONSTANTS["mref"]) +\ C["b2"] * ((mag - self.CONSTANTS["mref"]) ** 2.) else: d_m = self.CONSTANTS["mh"] - self.CONSTANTS["mref"] return C["e1"] + C["b3"] * (mag - self.CONSTANTS["mh"]) +\ (C["b1"] * d_m) + C["b2"] * (d_m ** 2.)
python
def get(self, uri, params={}): '''A generic method to make GET requests''' logging.debug("Requesting URL: "+str(urlparse.urljoin(self.BASE_URL, uri))) return requests.get(urlparse.urljoin(self.BASE_URL, uri), params=params, verify=False, auth=self.auth)
python
def _format_obj(self, item=None): """ Determines the type of the object and maps it to the correct formatter """ # Order here matters, odd behavior with tuples if item is None: return getattr(self, 'number')(item) elif isinstance(item, self.str_): #: String return item + " " elif isinstance(item, bytes): #: Bytes return getattr(self, 'bytes')(item) elif isinstance(item, self.numeric_): #: Float, int, etc. return getattr(self, 'number')(item) elif isinstance(item, self.dict_): #: Dict return getattr(self, 'dict')(item) elif isinstance(item, self.list_): #: List return getattr(self, 'list')(item) elif isinstance(item, tuple): #: Tuple return getattr(self, 'tuple')(item) elif isinstance(item, types.GeneratorType): #: Generator return getattr(self, 'generator')(item) elif isinstance(item, self.set_): #: Set return getattr(self, 'set')(item) elif isinstance(item, deque): #: Deque return getattr(self, 'deque')(item) elif isinstance(item, Sequence): #: Sequence return getattr(self, 'sequence')(item) #: Any other object return getattr(self, 'object')(item)
python
def setStr(self, name, n, value): """ setStr(CHeaderMap self, std::string name, limix::muint_t n, std::string value) Parameters ---------- name: std::string n: limix::muint_t value: std::string """ return _core.CHeaderMap_setStr(self, name, n, value)
python
def _call_method(self, method_name, *args, **kwargs): """Call the corresponding method using RIBCL, RIS or REDFISH Make the decision to invoke the corresponding method using RIBCL, RIS or REDFISH way. In case of none, throw out ``NotImplementedError`` """ if self.use_redfish_only: if method_name in SUPPORTED_REDFISH_METHODS: the_operation_object = self.redfish else: raise NotImplementedError() else: the_operation_object = self.ribcl if 'Gen10' in self.model: if method_name in SUPPORTED_REDFISH_METHODS: the_operation_object = self.redfish else: if (self.is_ribcl_enabled is not None and not self.is_ribcl_enabled): raise NotImplementedError() elif ('Gen9' in self.model) and (method_name in SUPPORTED_RIS_METHODS): the_operation_object = self.ris method = getattr(the_operation_object, method_name) LOG.debug(self._("Using %(class)s for method %(method)s."), {'class': type(the_operation_object).__name__, 'method': method_name}) return method(*args, **kwargs)
java
public MapWritable getValueMapWritable(String label) { HadoopObject o = getHadoopObject(VALUE, label, ObjectUtil.MAP, "Map"); if (o == null) { return null; } return (MapWritable) o.getObject(); }
python
def dump_bulk(cls, parent=None, keep_ids=True): """Dumps a tree branch to a python data structure.""" serializable_cls = cls._get_serializable_model() if ( parent and serializable_cls != cls and parent.__class__ != serializable_cls ): parent = serializable_cls.objects.get(pk=parent.pk) # a list of nodes: not really a queryset, but it works objs = serializable_cls.get_tree(parent) ret, lnk = [], {} for node, pyobj in zip(objs, serializers.serialize('python', objs)): depth = node.get_depth() # django's serializer stores the attributes in 'fields' fields = pyobj['fields'] del fields['parent'] # non-sorted trees have this if 'sib_order' in fields: del fields['sib_order'] if 'id' in fields: del fields['id'] newobj = {'data': fields} if keep_ids: newobj['id'] = pyobj['pk'] if (not parent and depth == 1) or\ (parent and depth == parent.get_depth()): ret.append(newobj) else: parentobj = lnk[node.parent_id] if 'children' not in parentobj: parentobj['children'] = [] parentobj['children'].append(newobj) lnk[node.pk] = newobj return ret
python
def _show_final_overflow_message(self, row_overflow, col_overflow): """Displays overflow message after import in statusbar""" if row_overflow and col_overflow: overflow_cause = _("rows and columns") elif row_overflow: overflow_cause = _("rows") elif col_overflow: overflow_cause = _("columns") else: raise AssertionError(_("Import cell overflow missing")) statustext = \ _("The imported data did not fit into the grid {cause}. " "It has been truncated. Use a larger grid for full import.").\ format(cause=overflow_cause) post_command_event(self.main_window, self.StatusBarMsg, text=statustext)
java
@Override public Long zremrangeByRank(final byte[] key, final long start, final long stop) { checkIsInMultiOrPipeline(); client.zremrangeByRank(key, start, stop); return client.getIntegerReply(); }
java
public static String getSystemProperty(final String string) throws PrivilegedActionException { return AccessController.doPrivileged(new PrivilegedAction<String>() { @Override public String run() { return System.getProperty(string); } }); }
python
def close(self): """ Closes all the iterators. This is particularly important if the iterators are files. """ if hasattr(self, 'iterators'): for it in self.iterators: if hasattr(it, 'close'): it.close()
python
def compute_correction_factors(data, true_conductivity, elem_file, elec_file): """Compute correction factors for 2D rhizotron geometries, following Weigand and Kemna, 2017, Biogeosciences https://doi.org/10.5194/bg-14-921-2017 Parameters ---------- data : :py:class:`pandas.DataFrame` measured data true_conductivity : float Conductivity in S/m elem_file : string path to CRTomo FE mesh file (elem.dat) elec_file : string path to CRTomo FE electrode file (elec.dat) Returns ------- correction_factors : Nx5 :py:class.`numpy.ndarray` measurement configurations and correction factors (a,b,m,n,correction_factor) """ settings = { 'rho': 100, 'pha': 0, 'elem': 'elem.dat', 'elec': 'elec.dat', '2D': True, 'sink_node': 100, } K = geometric_factors.compute_K_numerical(data, settings=settings) data = geometric_factors.apply_K(data, K) data = fixK.fix_sign_with_K(data) frequency = 100 data_onef = data.query('frequency == {}'.format(frequency)) rho_measured = data_onef['r'] * data_onef['k'] rho_true = 1 / true_conductivity * 1e4 correction_factors = rho_true / rho_measured collection = np.hstack(( data_onef[['a', 'b', 'm', 'n']].values, np.abs(correction_factors)[:, np.newaxis] )) return collection
java
private static String asString(ArrayList<RePairSymbolRecord> symbolizedString) { StringBuffer res = new StringBuffer(); RePairSymbolRecord s = symbolizedString.get(0); // since digrams are starting from left symbol, // the symbol 0 is never NULL do { res.append(s.getPayload().toString()).append(" "); s = s.getNext(); } while (null != s); return res.toString(); }
java
public List<CeQueueDto> selectByMainComponentUuid(DbSession session, String projectUuid) { return mapper(session).selectByMainComponentUuid(projectUuid); }
python
def create_repo(self, name, description='', homepage='', private=False, has_issues=True, has_wiki=True, has_downloads=True, team_id=0, auto_init=False, gitignore_template=''): """Create a repository for this organization if the authenticated user is a member. :param str name: (required), name of the repository :param str description: (optional) :param str homepage: (optional) :param bool private: (optional), If ``True``, create a private repository. API default: ``False`` :param bool has_issues: (optional), If ``True``, enable issues for this repository. API default: ``True`` :param bool has_wiki: (optional), If ``True``, enable the wiki for this repository. API default: ``True`` :param bool has_downloads: (optional), If ``True``, enable downloads for this repository. API default: ``True`` :param int team_id: (optional), id of the team that will be granted access to this repository :param bool auto_init: (optional), auto initialize the repository. :param str gitignore_template: (optional), name of the template; this is ignored if auto_int = False. :returns: :class:`Repository <github3.repos.Repository>` .. warning: ``name`` should be no longer than 100 characters """ url = self._build_url('repos', base_url=self._api) data = {'name': name, 'description': description, 'homepage': homepage, 'private': private, 'has_issues': has_issues, 'has_wiki': has_wiki, 'has_downloads': has_downloads, 'auto_init': auto_init, 'gitignore_template': gitignore_template} if team_id > 0: data.update({'team_id': team_id}) json = self._json(self._post(url, data), 201) return Repository(json, self) if json else None
python
def line_iterator(readable_file, size=None): # type: (IO[bytes], Optional[int]) -> Iterator[bytes] """Iterate over the lines of a file. Implementation reads each char individually, which is not very efficient. Yields: str: a single line in the file. """ read = readable_file.read line = [] byte = b"1" if size is None or size < 0: while byte: byte = read(1) line.append(byte) if byte in b"\n": yield b"".join(line) del line[:] else: while byte and size: byte = read(1) size -= len(byte) line.append(byte) if byte in b"\n" or not size: yield b"".join(line) del line[:]
java
private int ratioRemove(int[] h) { computeRatio(); int minIndex = Integer.MAX_VALUE; double minValue = Double.MAX_VALUE; for (int i = 0; i < nbHash; i++) { if (ratio[h[i]] < minValue) { minValue = ratio[h[i]]; minIndex = h[i]; } } return minIndex; }
python
def hicup_alignment_chart (self): """ Generate the HiCUP Aligned reads plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['Unique_Alignments_Read'] = { 'color': '#2f7ed8', 'name': 'Unique Alignments' } keys['Multiple_Alignments_Read'] = { 'color': '#492970', 'name': 'Multiple Alignments' } keys['Failed_To_Align_Read'] = { 'color': '#0d233a', 'name': 'Failed To Align' } keys['Too_Short_To_Map_Read'] = { 'color': '#f28f43', 'name': 'Too short to map' } # Construct a data structure for the plot - duplicate the samples for read 1 and read 2 data = {} for s_name in self.hicup_data: data['{} Read 1'.format(s_name)] = {} data['{} Read 2'.format(s_name)] = {} data['{} Read 1'.format(s_name)]['Unique_Alignments_Read'] = self.hicup_data[s_name]['Unique_Alignments_Read_1'] data['{} Read 2'.format(s_name)]['Unique_Alignments_Read'] = self.hicup_data[s_name]['Unique_Alignments_Read_2'] data['{} Read 1'.format(s_name)]['Multiple_Alignments_Read'] = self.hicup_data[s_name]['Multiple_Alignments_Read_1'] data['{} Read 2'.format(s_name)]['Multiple_Alignments_Read'] = self.hicup_data[s_name]['Multiple_Alignments_Read_2'] data['{} Read 1'.format(s_name)]['Failed_To_Align_Read'] = self.hicup_data[s_name]['Failed_To_Align_Read_1'] data['{} Read 2'.format(s_name)]['Failed_To_Align_Read'] = self.hicup_data[s_name]['Failed_To_Align_Read_2'] data['{} Read 1'.format(s_name)]['Too_Short_To_Map_Read'] = self.hicup_data[s_name]['Too_Short_To_Map_Read_1'] data['{} Read 2'.format(s_name)]['Too_Short_To_Map_Read'] = self.hicup_data[s_name]['Too_Short_To_Map_Read_2'] # Config for the plot config = { 'id': 'hicup_mapping_stats_plot', 'title': 'HiCUP: Mapping Statistics', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } return bargraph.plot(data, keys, config)
python
def is_valid_requeue_limit(requeue_limit): """Checks if the given requeue limit is valid. A valid requeue limit is always greater than or equal to -1. """ if not isinstance(requeue_limit, (int, long)): return False if requeue_limit <= -2: return False return True
java
protected Boolean parseInstanceStatus(InstanceStatus status) { if (status == null) { return null; } return status == InstanceStatus.UP; }
java
public Observable<Page<NetworkSecurityGroupInner>> listAsync() { return listWithServiceResponseAsync() .map(new Func1<ServiceResponse<Page<NetworkSecurityGroupInner>>, Page<NetworkSecurityGroupInner>>() { @Override public Page<NetworkSecurityGroupInner> call(ServiceResponse<Page<NetworkSecurityGroupInner>> response) { return response.body(); } }); }
java
public NotificationSettings getProjectNotificationSettings(int projectId) throws GitLabApiException { Response response = get(Response.Status.OK, null, "projects", projectId, "notification_settings"); return (response.readEntity(NotificationSettings.class)); }
python
def get_pay_giftcard(self, rule_id): """ 查询支付后投放卡券的规则 详情请参见 https://mp.weixin.qq.com/wiki?id=mp1466494654_K9rNz :param rule_id: 支付即会员的规则 ID :return: 支付后投放卡券的规则 :rtype: dict """ return self._post( 'card/paygiftcard/getbyid', data={ 'rule_id': rule_id, }, result_processor=lambda x: x['rule_info'], )
java
@Override public View generateView(Context ctx) { VH viewHolder = getViewHolder(createView(ctx, null)); //as we already know the type of our ViewHolder cast it to our type bindView(viewHolder, Collections.EMPTY_LIST); //return the bound view return viewHolder.itemView; }
python
def image(self, height=1, module_width=1, add_quiet_zone=True): """Get the barcode as PIL.Image. By default the image is one pixel high and the number of modules pixels wide, with 10 empty modules added to each side to act as the quiet zone. The size can be modified by setting height and module_width, but if used in a web page it might be a good idea to do the scaling on client side. :param height: Height of the image in number of pixels. :param module_width: A multiplier for the width. :param add_quiet_zone: Whether to add 10 empty modules to each side of the barcode. :rtype: PIL.Image :return: A monochromatic image containing the barcode as black bars on white background. """ if Image is None: raise Code128.MissingDependencyError("PIL module is required to use image method.") modules = list(self.modules) if add_quiet_zone: # Add ten space modules to each side of the barcode. modules = [1] * self.quiet_zone + modules + [1] * self.quiet_zone width = len(modules) img = Image.new(mode='1', size=(width, 1)) img.putdata(modules) if height == 1 and module_width == 1: return img else: new_size = (width * module_width, height) return img.resize(new_size, resample=Image.NEAREST)
python
def get_var(self, name): """Return an nd array from model library""" # How many dimensions. rank = self.get_var_rank(name) # The shape array is fixed size shape = np.empty((MAXDIMS, ), dtype='int32', order='F') shape = self.get_var_shape(name) # there should be nothing here... assert sum(shape[rank:]) == 0 # variable type name type_ = self.get_var_type(name) is_numpytype = type_ in TYPEMAP if is_numpytype: # Store the data in this type arraytype = ndpointer(dtype=TYPEMAP[type_], ndim=rank, shape=shape, flags='F') # '' or b'' elif not type_: raise ValueError('type not found for variable {}'.format(name)) else: arraytype = self.make_compound_ctype(name) # Create a pointer to the array type data = arraytype() # The functions get_var_type/_shape/_rank are already wrapped with # python function converter, get_var isn't. c_name = create_string_buffer(name) get_var = self.library.get_var get_var.argtypes = [c_char_p, POINTER(arraytype)] get_var.restype = None # Get the array get_var(c_name, byref(data)) if not data: logger.info("NULL pointer returned") return None if is_numpytype: # for now always a pointer, see python-subgrid for advanced examples array = np.ctypeslib.as_array(data) else: array = structs2pandas(data.contents) return array
java
@Override public final PArray optArray(final String key, final PArray defaultValue) { PArray result = optArray(key); return result == null ? defaultValue : result; }
java
protected void NCName() { m_ops.setOp(m_ops.getOp(OpMap.MAPINDEX_LENGTH), m_queueMark - 1); m_ops.setOp(OpMap.MAPINDEX_LENGTH, m_ops.getOp(OpMap.MAPINDEX_LENGTH) + 1); nextToken(); }
java
@Override public Map<String, FieldTypes> get(final Collection<String> fieldNames, Collection<String> indexNames) { // Shortcut - if we don't select any fields we don't have to do any database query if (fieldNames.isEmpty()) { return Collections.emptyMap(); } // We have to transform the field type database entries to make them usable for the user. // // [ // { // "index_name": "graylog_0", // "fields": [ // {"field_name": "message", "physical_type": "text"}, // {"field_name": "source", "physical_type": "keyword"} // ] // }, // { // "index_name": "graylog_1", // "fields": [ // {"field_name": "message", "physical_type": "text"}, // ] // } // ] // // gets transformed into // // { // "message": { // "field_name": "message", // "types": [ // { // "type": "string", // "properties": ["full-text-search"], // "index_names": ["graylog_0", "graylog_1"] // ] // }, // "source": { // "field_name": "source", // "types": [ // { // "type": "string", // "properties": ["enumerable"], // "index_names": ["graylog_0"] // ] // } // } // field-name -> {physical-type -> [index-name, ...]} final Map<String, SetMultimap<String, String>> fields = new HashMap<>(); // Convert the data from the database to be indexed by field name and physical type getTypesStream(fieldNames, indexNames).forEach(types -> { final String indexName = types.indexName(); types.fields().forEach(fieldType -> { final String fieldName = fieldType.fieldName(); final String physicalType = fieldType.physicalType(); if (fieldNames.contains(fieldName)) { if (indexNames.isEmpty() || indexNames.contains(indexName)) { if (!fields.containsKey(fieldName)) { fields.put(fieldName, HashMultimap.create()); } fields.get(fieldName).put(physicalType, indexName); } } }); }); final ImmutableMap.Builder<String, FieldTypes> result = ImmutableMap.builder(); for (Map.Entry<String, SetMultimap<String, String>> fieldNameEntry : fields.entrySet()) { final String fieldName = fieldNameEntry.getKey(); final Map<String, Collection<String>> physicalTypes = fieldNameEntry.getValue().asMap(); // Use the field type mapper to do the conversion between the Elasticsearch type and our logical type final Set<FieldTypes.Type> types = physicalTypes.entrySet().stream() .map(entry -> { final String physicalType = entry.getKey(); final Set<String> indices = ImmutableSet.copyOf(entry.getValue()); return typeMapper.mapType(physicalType).map(t -> t.withIndexNames(indices)); }) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toSet()); result.put(fieldName, FieldTypes.create(fieldName, types)); } return result.build(); }
java
public static byte toByte(TypeOfAddress toa) { byte b = 0; if (toa.getTon() != null) { b |= ( toa.getTon().toInt() << 0 ); } if (toa.getNpi() != null) { b |= ( toa.getNpi().toInt() << 4 ); } b |= ( 1 << 7 ); return b; }
java
public static <T> T queryBean(String sql, Class<T> beanType, Object[] params) throws YankSQLException { return queryBean(YankPoolManager.DEFAULT_POOL_NAME, sql, beanType, params); }
python
def _clone_post_init(self, obj=None, **kwargs): """ obj must be another Plottable instance. obj is used by Clone to properly transfer all attributes onto this object. """ # Initialize the extra attributes if obj is None or obj is self: # We must be asrootpy-ing a ROOT object # or freshly init-ing a rootpy object for attr, value in Plottable.EXTRA_ATTRS.items(): # Use the default value setattr(self, attr, value) else: for attr, value in Plottable.EXTRA_ATTRS.items(): setattr(self, attr, getattr(obj, attr)) # Create aliases from deprecated to current attributes for depattr, newattr in Plottable.EXTRA_ATTRS_DEPRECATED.items(): setattr(Plottable, depattr, property( fget=Plottable._get_attr_depr(depattr, newattr), fset=Plottable._set_attr_depr(depattr, newattr))) if obj is None or obj is self: # We must be asrootpy-ing a ROOT object # or freshly init-ing a rootpy object # Initialize style attrs to style of TObject if isinstance(self, ROOT.TAttLine): self._linecolor = Color(ROOT.TAttLine.GetLineColor(self)) self._linestyle = LineStyle(ROOT.TAttLine.GetLineStyle(self)) self._linewidth = ROOT.TAttLine.GetLineWidth(self) else: # HistStack self._linecolor = Color(Plottable.DEFAULT_DECOR['linecolor']) self._linestyle = LineStyle(Plottable.DEFAULT_DECOR['linestyle']) self._linewidth = Plottable.DEFAULT_DECOR['linewidth'] if isinstance(self, ROOT.TAttFill): self._fillcolor = Color(ROOT.TAttFill.GetFillColor(self)) self._fillstyle = FillStyle(ROOT.TAttFill.GetFillStyle(self)) else: # HistStack self._fillcolor = Color(Plottable.DEFAULT_DECOR['fillcolor']) self._fillstyle = FillStyle(Plottable.DEFAULT_DECOR['fillstyle']) if isinstance(self, ROOT.TAttMarker): self._markercolor = Color(ROOT.TAttMarker.GetMarkerColor(self)) self._markerstyle = MarkerStyle(ROOT.TAttMarker.GetMarkerStyle(self)) self._markersize = ROOT.TAttMarker.GetMarkerSize(self) else: # HistStack self._markercolor = Color(Plottable.DEFAULT_DECOR['markercolor']) self._markerstyle = MarkerStyle(Plottable.DEFAULT_DECOR['markerstyle']) self._markersize = Plottable.DEFAULT_DECOR['markersize'] if obj is None: # Populate defaults if we are not asrootpy-ing existing object decor = dict(**Plottable.DEFAULT_DECOR) decor.update(Plottable.EXTRA_ATTRS) if 'color' in kwargs: decor.pop('linecolor', None) decor.pop('fillcolor', None) decor.pop('markercolor', None) decor.update(kwargs) self.decorate(**decor) else: # Initialize style attrs to style of the other object if isinstance(obj, ROOT.TAttLine): self.SetLineColor(obj.GetLineColor()) self.SetLineStyle(obj.GetLineStyle()) self.SetLineWidth(obj.GetLineWidth()) if isinstance(obj, ROOT.TAttFill): self.SetFillColor(obj.GetFillColor()) self.SetFillStyle(obj.GetFillStyle()) if isinstance(obj, ROOT.TAttMarker): self.SetMarkerColor(obj.GetMarkerColor()) self.SetMarkerStyle(obj.GetMarkerStyle()) self.SetMarkerSize(obj.GetMarkerSize()) if kwargs: self.decorate(**kwargs)
python
def send_array( socket, A=None, metadata=None, flags=0, copy=False, track=False, compress=None, chunksize=50 * 1000 * 1000 ): """send a numpy array with metadata over zmq message is mostly multipart: metadata | array part 1 | array part 2, etc only metadata: metadata the chunksize roughly determines the size of the parts being sent if the chunksize is too big, you get an error like: zmq.error.Again: Resource temporarily unavailable """ # create a metadata dictionary for the message md = {} # always add a timestamp md['timestamp'] = datetime.datetime.now().isoformat() # copy extra metadata if metadata: md.update(metadata) # if we don't have an array if A is None: # send only json md['parts'] = 0 socket.send_json(md, flags) # and we're done return # support single values (empty shape) if isinstance(A, float) or isinstance(A, int): A = np.asarray(A) # add array metadata md['dtype'] = str(A.dtype) md['shape'] = A.shape # determine number of parts md['parts'] = int(np.prod(A.shape) // chunksize + 1) try: # If an array has a fill value assume it's an array with missings # store the fill_Value in the metadata and fill the array before sending. # asscalar should work for scalar, 0d array or nd array of size 1 md['fill_value'] = np.asscalar(A.fill_value) A = A.filled() except AttributeError: # no masked array, nothing to do pass # send json, followed by array (in x parts) socket.send_json(md, flags | zmq.SNDMORE) # although the check is not strictly necessary, we try to maintain fast # pointer transfer when there is only 1 part if md['parts'] == 1: msg = memoryview(np.ascontiguousarray(A)) socket.send(msg, flags, copy=copy, track=track) else: # split array at first dimension and send parts for i, a in enumerate(np.array_split(A, md['parts'])): # Make a copy if required and pass along the memoryview msg = memoryview(np.ascontiguousarray(a)) flags_ = flags if i != md['parts'] - 1: flags_ |= zmq.SNDMORE socket.send(msg, flags_, copy=copy, track=track) return
java
public static void registerMbean(MBeanServer server, ModelMBean mbean, ObjectName name) { try { synchronized(LOCK) { if(server.isRegistered(name)) JmxUtils.unregisterMbean(server, name); server.registerMBean(mbean, name); } } catch(Exception e) { logger.error("Error registering mbean:", e); } }
python
def find_bled112_devices(cls): """Look for BLED112 dongles on this computer and start an instance on each one""" found_devs = [] ports = serial.tools.list_ports.comports() for port in ports: if not hasattr(port, 'pid') or not hasattr(port, 'vid'): continue # Check if the device matches the BLED112's PID/VID combination if port.pid == 1 and port.vid == 9304: found_devs.append(port.device) return found_devs
python
def dados_qrcode(cfe): """Compila os dados que compõem o QRCode do CF-e-SAT, conforme a documentação técnica oficial **Guia para Geração do QRCode pelo Aplicativo Comercial**, a partir de uma instância de ``ElementTree`` que represente a árvore do XML do CF-e-SAT. :param cfe: Instância de :py:mod:`xml.etree.ElementTree.ElementTree`. :return: String contendo a massa de dados para ser usada ao gerar o QRCode. :rtype: str Por exemplo, para gerar a imagem do QRCode [#qrcode]_: .. sourcecode:: python import xml.etree.ElementTree as ET import qrcode with open('CFe_1.xml', 'r') as fp: tree = ET.parse(fp) imagem = qrcode.make(dados_qrcode(tree)) .. [#qrcode] https://pypi.python.org/pypi/qrcode """ infCFe = cfe.getroot().find('./infCFe') cnpjcpf_consumidor = infCFe.findtext('dest/CNPJ') or \ infCFe.findtext('dest/CPF') or '' return '|'.join([ infCFe.attrib['Id'][3:], # remove prefixo "CFe" '{}{}'.format( infCFe.findtext('ide/dEmi'), infCFe.findtext('ide/hEmi')), infCFe.findtext('total/vCFe'), cnpjcpf_consumidor, infCFe.findtext('ide/assinaturaQRCODE'),])
python
def bfs(self, graph, start): """ Performs BFS operation for eliminating useless loop transitions Args: graph (PDA): the PDA object start (PDA state): The PDA initial state Returns: list: A cleaned, smaller list of DFA states """ newstatediag = {} # maintain a queue of paths queue = [] visited = [] # push the first path into the queue queue.append(start) while queue: # get the first path from the queue state = queue.pop(0) # get the last node from the path # visited visited.append(state.id) # enumerate all adjacent nodes, construct a new path and push it # into the queue for key in state.trans: if state.trans[key] != []: if key not in visited: for nextstate in graph: if graph[nextstate].id == key: queue.append(graph[nextstate]) break i = 0 for state in graph: if graph[state].id in visited: newstatediag[i] = graph[state] i = i + 1 return newstatediag
python
def getRanking(self, profile, sampleFileName = None): """ Returns a list of lists that orders all candidates in tiers from best to worst when we use MCMC approximation to compute Bayesian utilities for an election profile. :ivar Profile profile: A Profile object that represents an election profile. :ivar str sampleFileName: An optional argument for the name of the input file containing sample data. If a file name is given, this method will use the samples in the file instead of generating samples itself. """ if sampleFileName != None: candScoresMap = self.getCandScoresMapFromSamplesFile(profile, sampleFileName) else: candScoresMap = self.getCandScoresMap(profile) # We generate a map that associates each score with the candidates that have that acore. reverseCandScoresMap = dict() for key, value in candScoresMap.items(): if value not in reverseCandScoresMap.keys(): reverseCandScoresMap[value] = [key] else: reverseCandScoresMap[value].append(key) # We sort the scores by either decreasing order or increasing order. if self.maximizeCandScore == True: sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True) else: sortedCandScores = sorted(reverseCandScoresMap.keys()) # We put the candidates into our ranking based on the order in which their score appears ranking = [] for candScore in sortedCandScores: for cand in reverseCandScoresMap[candScore]: ranking.append(cand) return ranking
python
def connect_put_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501 """connect_put_namespaced_pod_proxy # noqa: E501 connect PUT requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_put_namespaced_pod_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_put_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.connect_put_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
java
static synchronized String getTimeZoneDisplay(TimeZone tz, boolean daylight, int style, Locale locale) { Object key = new TimeZoneDisplayKey(tz, daylight, style, locale); String value = (String) cTimeZoneDisplayCache.get(key); if (value == null) { // This is a very slow call, so cache the results. value = tz.getDisplayName(daylight, style, locale); cTimeZoneDisplayCache.put(key, value); } return value; }
python
def hexdump(buf, num_bytes, offset=0, width=32): """Perform a hexudmp of the buffer. Returns the hexdump as a canonically-formatted string. """ ind = offset end = offset + num_bytes lines = [] while ind < end: chunk = buf[ind:ind + width] actual_width = len(chunk) hexfmt = '{:02X}' blocksize = 4 blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)] # Need to get any partial lines num_left = actual_width % blocksize # noqa: S001 Fix false alarm if num_left: blocks += [hexfmt * num_left + '--' * (blocksize - num_left)] blocks += ['--' * blocksize] * (width // blocksize - len(blocks)) hexoutput = ' '.join(blocks) printable = tuple(chunk) lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))), str(ind - offset).ljust(len(str(end))), ''.join(chr(c) if 31 < c < 128 else '.' for c in chunk)))) ind += width return '\n'.join(lines)
java
public void setFloat(String key, float value) { checkKeyIsUniform(key); checkFloatNotNaNOrInfinity("value", value); NativeShaderData.setFloat(getNative(), key, value); }
java
public Future<PutItemResult> putItemAsync(final PutItemRequest putItemRequest) throws AmazonServiceException, AmazonClientException { return executorService.submit(new Callable<PutItemResult>() { public PutItemResult call() throws Exception { return putItem(putItemRequest); } }); }
python
def _contiguous_offsets(self, offsets): """ Sorts the input list of integer offsets, ensures that values are contiguous. """ offsets.sort() for i in range(len(offsets) - 1): assert offsets[i] + 1 == offsets[i + 1], \ "Offsets not contiguous: %s" % (offsets,) return offsets
java
public void destroy(boolean removeFromQueue) { if (mState == State.IDLE) { return; } if (removeFromQueue) { mThreadPoolExecutor.remove(this); } if (mState == State.DECODED) { mMemoryCache.put(getCacheKey(), mBitmap); } mBitmap = null; mDrawingOptions.inBitmap = null; // since tiles are pooled and reused, make sure to reset the cache key or you'll render the wrong tile from cache mCacheKey = null; mState = State.IDLE; mListener.onTileDestroyed(this); }
java
@Override public void setup(AbstractInvokable parent) { @SuppressWarnings("unchecked") final FlatMapFunction<IT, OT> mapper = BatchTask.instantiateUserCode(this.config, userCodeClassLoader, FlatMapFunction.class); this.mapper = mapper; FunctionUtils.setFunctionRuntimeContext(mapper, getUdfRuntimeContext()); }
python
async def bluetooth(dev: Device, target, value): """Get or set bluetooth settings.""" if target and value: await dev.set_bluetooth_settings(target, value) print_settings(await dev.get_bluetooth_settings())
python
def _phi2deriv(self,R,z,phi=0.,t=0.): """ NAME: _phi2deriv PURPOSE: evaluate the second azimuthal derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the second azimuthal derivative HISTORY: 2016-06-15 - Written - Bovy (UofT) """ if not self.isNonAxi: phi= 0. x,y,z= bovy_coords.cyl_to_rect(R,phi,z) if not self._aligned: raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa)") Fx= self._force_xyz(x,y,z,0) Fy= self._force_xyz(x,y,z,1) phixx= self._2ndderiv_xyz(x,y,z,0,0) phixy= self._2ndderiv_xyz(x,y,z,0,1) phiyy= self._2ndderiv_xyz(x,y,z,1,1) return R**2.*(numpy.sin(phi)**2.*phixx+numpy.cos(phi)**2.*phiyy\ -2.*numpy.cos(phi)*numpy.sin(phi)*phixy)\ +R*(numpy.cos(phi)*Fx+numpy.sin(phi)*Fy)
java
public static Lock toLock(Entity entity) { return new Lock(entity.getKey().getName(), (String) entity.getProperty(TRANSACTION_PROPERTY), (Date) entity.getProperty(TIMESTAMP_PROPERTY)); }
java
private List<SimulatorEvent> processTaskAttemptCompletionEvent( TaskAttemptCompletionEvent event) { if (LOG.isDebugEnabled()) { LOG.debug("Processing task attempt completion event" + event); } long now = event.getTimeStamp(); TaskStatus finalStatus = event.getStatus(); TaskAttemptID taskID = finalStatus.getTaskID(); boolean killedEarlier = orphanTaskCompletions.remove(taskID); if (!killedEarlier) { finishRunningTask(finalStatus, now); } return SimulatorEngine.EMPTY_EVENTS; }
java
protected void addProperties(Element element, BeanDefinitionBuilder builder) { NamedNodeMap attributes = element.getAttributes(); for (int i = 0; i < attributes.getLength(); i++) { Node node = attributes.item(i); String attrName = getNodeName(node); attrName = "class".equals(attrName) ? "clazz" : attrName; builder.addPropertyValue(attrName, node.getNodeValue()); } }
python
def delFromTimeVary(self,*params): ''' Removes any number of parameters from time_vary for this instance. Parameters ---------- params : string Any number of strings naming attributes to be removed from time_vary Returns ------- None ''' for param in params: if param in self.time_vary: self.time_vary.remove(param)
java
@Override public StateConnection onCloseRead() { ConnectionProtocol request = request(); if (request != null) { request.onCloseRead(); } _sequenceClose.set(_sequenceRead.get()); if (_sequenceFlush.get() < _sequenceClose.get()) { _isClosePending.set(true); if (_sequenceFlush.get() < _sequenceClose.get()) { return StateConnection.CLOSE_READ_S; } else { _isClosePending.set(false); return StateConnection.CLOSE; } } else { return StateConnection.CLOSE; } }
java
@Deprecated public static FileSystem getProxiedFileSystem(@NonNull final String userNameToProxyAs, Properties properties, URI fsURI) throws IOException { return getProxiedFileSystem(userNameToProxyAs, properties, fsURI, new Configuration()); }
java
public V retrieveOrCreate(K key, Factory<V> factory) { // Clean up stale entries on every put. // This should avoid a slow memory leak of reference objects. this.cleanUpStaleEntries(); return retrieveOrCreate(key, factory, new FutureRef<V>()); }
java
private void emitWindowResult(W window) throws Exception { BaseRow aggResult = windowFunction.getWindowAggregationResult(window); if (sendRetraction) { previousState.setCurrentNamespace(window); BaseRow previousAggResult = previousState.value(); // has emitted result for the window if (previousAggResult != null) { // current agg is not equal to the previous emitted, should emit retract if (!equaliser.equalsWithoutHeader(aggResult, previousAggResult)) { reuseOutput.replace((BaseRow) getCurrentKey(), previousAggResult); BaseRowUtil.setRetract(reuseOutput); // send retraction collector.collect(reuseOutput); // send accumulate reuseOutput.replace((BaseRow) getCurrentKey(), aggResult); BaseRowUtil.setAccumulate(reuseOutput); collector.collect(reuseOutput); // update previousState previousState.update(aggResult); } // if the previous agg equals to the current agg, no need to send retract and accumulate } // the first fire for the window, only send accumulate else { // send accumulate reuseOutput.replace((BaseRow) getCurrentKey(), aggResult); BaseRowUtil.setAccumulate(reuseOutput); collector.collect(reuseOutput); // update previousState previousState.update(aggResult); } } else { reuseOutput.replace((BaseRow) getCurrentKey(), aggResult); // no need to set header collector.collect(reuseOutput); } }
java
public boolean hasValue(String property, String value) { boolean hasValue = false; if (has()) { Map<String, Object> fieldValues = buildFieldValues(property, value); TResult result = getDao().queryForFieldValues(fieldValues); try { hasValue = result.getCount() > 0; } finally { result.close(); } } return hasValue; }
java
public static String readable( Visitable visitable, ExecutionContext context ) { // return visit(visitable, new ReadableVisitor()).getString(); return visit(visitable, new JcrSql2Writer(context)).getString(); }
python
def credit(self, amount, debit_account, description, debit_memo="", credit_memo="", datetime=None): """ Post a credit of 'amount' and a debit of -amount against this account and credit_account respectively. note amount must be non-negative. """ assert amount >= 0 return self.post(-amount, debit_account, description, self_memo=credit_memo, other_memo=debit_memo, datetime=datetime)
python
def optimize(self, loss, num_async_replicas=1, use_tpu=False): """Return a training op minimizing loss.""" lr = learning_rate.learning_rate_schedule(self.hparams) if num_async_replicas > 1: log_info("Dividing learning rate by num_async_replicas: %d", num_async_replicas) lr /= math.sqrt(float(num_async_replicas)) train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu) return train_op
java
public Map<String, String> getUriVariablesForQueryWithPost(BullhornEntityInfo entityInfo, Set<String> fieldSet, QueryParams params) { Map<String, String> uriVariables = params.getParameterMap(); this.addCommonUriVariables(fieldSet, entityInfo, uriVariables); return uriVariables; }
python
def spin(self, use_thread=False): '''call callback for all data forever (until \C-c) :param use_thread: use thread for spin (do not block) ''' if use_thread: if self._thread is not None: raise 'spin called twice' self._thread = threading.Thread(target=self._spin_internal) self._thread.setDaemon(True) self._thread.start() else: self._spin_internal()
java
public void readEofPacket() throws SQLException, IOException { Buffer buffer = reader.getPacket(true); switch (buffer.getByteAt(0)) { case EOF: buffer.skipByte(); this.hasWarnings = buffer.readShort() > 0; this.serverStatus = buffer.readShort(); break; case ERROR: ErrorPacket ep = new ErrorPacket(buffer); throw new SQLException("Could not connect: " + ep.getMessage(), ep.getSqlState(), ep.getErrorNumber()); default: throw new SQLException("Unexpected packet type " + buffer.getByteAt(0) + " instead of EOF"); } }