language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public ChannelBuffer formatQueryV1(final TSQuery query, final List<DataPoints[]> results, final List<Annotation> globals) { throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, "The requested API endpoint has not been implemented", this.getClass().getCanonicalName() + " has not implemented formatQueryV1"); }
java
@Override public IRundeckProject createFrameworkProjectStrict(final String projectName, final Properties properties) { return createFrameworkProjectInt(projectName,properties,true); }
java
public OvhTask serviceName_modem_blocIp_POST(String serviceName, OvhServiceStatusEnum status) throws IOException { String qPath = "/xdsl/{serviceName}/modem/blocIp"; StringBuilder sb = path(qPath, serviceName); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "status", status); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhTask.class); }
python
def _ExtractList(self, fields, ignores=(",",), terminators=()): """Extract a list from the given fields.""" extracted = [] i = 0 for i, field in enumerate(fields): # Space-separated comma; ignore, but this is not a finished list. # Similar for any other specified ignores (eg, equals sign). if field in ignores: continue # However, some fields are specifically meant to terminate iteration. if field in terminators: break extracted.append(field.strip("".join(ignores))) # Check for continuation; this will either be a trailing comma or the # next field after this one being a comma. The lookahead here is a bit # nasty. if not (field.endswith(",") or set(fields[i + 1:i + 2]).intersection(ignores)): break return extracted, fields[i + 1:]
python
def mime_type(filename): """ Guess mime type for the given file name Note: this implementation uses python_magic package which is not thread-safe, as a workaround global lock is used for the ability to work in threaded environment :param filename: file name to guess :return: str """ # TODO: write lock-free mime_type function try: __mime_lock.acquire() extension = filename.split(".") extension = extension[len(extension) - 1] if extension == "woff2": return "application/font-woff2" if extension == "css": return "text/css" m = magic.from_file(filename, mime=True) m = m.decode() if isinstance(m, bytes) else m # compatibility fix, some versions return bytes some - str if m == "text/plain": guessed_type = mimetypes.guess_type(filename)[0] # for js-detection if guessed_type: return guessed_type return m finally: __mime_lock.release()
java
public void addStaticElVarValue(final String name, final String value) { check(!staticElValues.containsKey(name), "Duplicate el variable %s value declaration: %s (original: %s)", name, value, staticElValues.get(name)); check(!dynamicElValues.contains(name), "El variable %s can't be registered as static, because dynamic declaration already defined", name); staticElValues.put(name, value); }
python
def checkUpload(self, file_obj, full_path = '/', overwrite = False): """Check whether it is possible to upload a file. >>> s = nd.checkUpload('~/flower.png','/Picture/flower.png') :param file_obj: A file-like object to check whether possible to upload. You can pass a string as a file_obj or a real file object. :param str full_path: The full path to upload the file to, *including the file name*. If the destination directory does not yet exist, it will be created. :param overwrite: Whether to overwrite an existing file at the given path. (Default ``False``.) :return: ``True`` if possible to upload or ``False`` if impossible to upload. """ try: file_obj = file_obj.name except: file_obj = file_obj # do nothing file_size = os.stat(file_obj).st_size now = datetime.datetime.now().isoformat() data = {'uploadsize': file_size, 'overwrite': 'T' if overwrite else 'F', 'getlastmodified': now, 'dstresource': full_path, 'userid': self.user_id, 'useridx': self.useridx, } s, metadata = self.POST('checkUpload', data) if not s: print metadata return s
java
public static Controller.StreamCut decode(final String scope, final String stream, Map<Long, Long> streamCut) { return Controller.StreamCut.newBuilder().setStreamInfo(createStreamInfo(scope, stream)).putAllCut(streamCut).build(); }
java
@SuppressWarnings("nls") protected DataSource datasourceFromConfig(JdbcOptionsBean config) { Properties props = new Properties(); props.putAll(config.getDsProperties()); setConfigProperty(props, "jdbcUrl", config.getJdbcUrl()); setConfigProperty(props, "username", config.getUsername()); setConfigProperty(props, "password", config.getPassword()); setConfigProperty(props, "connectionTimeout", config.getConnectionTimeout()); setConfigProperty(props, "idleTimeout", config.getIdleTimeout()); setConfigProperty(props, "maxPoolSize", config.getMaximumPoolSize()); setConfigProperty(props, "maxLifetime", config.getMaxLifetime()); setConfigProperty(props, "minIdle", config.getMinimumIdle()); setConfigProperty(props, "poolName", config.getPoolName()); setConfigProperty(props, "autoCommit", config.isAutoCommit()); HikariConfig hikariConfig = new HikariConfig(props); return new HikariDataSource(hikariConfig); }
python
def kde(self, name, npoints=_npoints, **kwargs): """ Calculate kernel density estimator for parameter """ data = self.get(name,**kwargs) return kde(data,npoints)
python
def reply_webapi(self, text, attachments=None, as_user=True, in_thread=None): """ Send a reply to the sender using Web API (This function supports formatted message when using a bot integration) If the message was send in a thread, answer in a thread per default. """ if in_thread is None: in_thread = 'thread_ts' in self.body if in_thread: self.send_webapi(text, attachments=attachments, as_user=as_user, thread_ts=self.thread_ts) else: text = self.gen_reply(text) self.send_webapi(text, attachments=attachments, as_user=as_user)
java
@XmlElementDecl(namespace = "http://www.ibm.com/websphere/wim", name = "businessCategory") public JAXBElement<String> createBusinessCategory(String value) { return new JAXBElement<String>(_BusinessCategory_QNAME, String.class, null, value); }
python
def merge_link_object(serializer, data, instance): """Add a 'links' attribute to the data that maps field names to URLs. NOTE: This is the format that Ember Data supports, but alternative implementations are possible to support other formats. """ link_object = {} if not getattr(instance, 'pk', None): # If instance doesn't have a `pk` field, we'll assume it doesn't # have a canonical resource URL to hang a link off of. # This generally only affectes Ephemeral Objects. return data link_fields = serializer.get_link_fields() for name, field in six.iteritems(link_fields): # For included fields, omit link if there's no data. if name in data and not data[name]: continue link = getattr(field, 'link', None) if link is None: base_url = '' if settings.ENABLE_HOST_RELATIVE_LINKS: # if the resource isn't registered, this will default back to # using resource-relative urls for links. base_url = DynamicRouter.get_canonical_path( serializer.get_resource_key(), instance.pk ) or '' link = '%s%s/' % (base_url, name) # Default to DREST-generated relation endpoints. elif callable(link): link = link(name, field, data, instance) link_object[name] = link if link_object: data['links'] = link_object return data
java
public Pairtree getPairtree(final String aBucket, final String aBucketPath) throws PairtreeException { if (myAccessKey.isPresent() && mySecretKey.isPresent()) { final String accessKey = myAccessKey.get(); final String secretKey = mySecretKey.get(); if (myRegion.isPresent()) { return new S3Pairtree(myVertx, aBucket, aBucketPath, accessKey, secretKey, myRegion.get()); } else { return new S3Pairtree(myVertx, aBucket, aBucketPath, accessKey, secretKey); } } else { throw new PairtreeException(MessageCodes.PT_021); } }
java
public void useAttachmentServiceWithWebClient() throws Exception { final String serviceURI = "http://localhost:" + port + "/services/attachments/multipart"; JSONProvider provider = new JSONProvider(); provider.setIgnoreNamespaces(true); provider.setInTransformElements( Collections.singletonMap("Book", "{http://books}Book")); WebClient client = WebClient.create(serviceURI, Collections.singletonList(provider)); client.type("multipart/mixed").accept("multipart/mixed"); MultipartBody body = createMultipartBody(); System.out.println(); System.out.println("Posting Book attachments with a WebClient"); MultipartBody bodyResponse = client.post(body, MultipartBody.class); verifyMultipartResponse(bodyResponse); }
java
@Override public Optional<String> getMessage(final String code) { try { return Optional.of(messageSourceAccessor.getMessage(code)); } catch(NoSuchMessageException e) { return Optional.empty(); } }
java
public void attribute(String attribute, String expectedValue) { String value = checkAttribute(attribute, expectedValue, 0, 0); String reason = NO_ELEMENT_FOUND; if (value == null && getElement().is().present()) { reason = "Attribute doesn't exist"; } assertNotNull(reason, value); assertEquals("Attribute Mismatch", expectedValue, value); }
python
def read(self, n): """Read n bytes. Returns exactly n bytes of data unless the underlying raw IO stream reaches EOF. """ buf = self._read_buf pos = self._read_pos end = pos + n if end <= len(buf): # Fast path: the data to read is fully buffered. self._read_pos += n return self._update_pos(buf[pos:end]) # Slow path: read from the stream until enough bytes are read, # or until an EOF occurs or until read() would block. wanted = max(self.buffer_size, n) while len(buf) < end: chunk = self.raw.read(wanted) if not chunk: break buf += chunk self._read_buf = buf[end:] # Save the extra data in the buffer. self._read_pos = 0 return self._update_pos(buf[pos:end])
java
@Override public Long next() { if (!hasNext()) { throw new NoSuchElementException(toString() + " ended"); } current = next; next = next + increment; return current(); }
java
public static List<PropertyDescriptor> getPropertyDescriptorsWithGetters(final Class<?> clazz) { final List<PropertyDescriptor> relevantDescriptors = new ArrayList<PropertyDescriptor>(); final PropertyDescriptor[] propertyDescriptors = getPropertyDescriptors(clazz); if (propertyDescriptors != null) { for (final PropertyDescriptor propertyDescriptor : propertyDescriptors) { final Method getter = propertyDescriptor.getReadMethod(); final boolean getterExists = getter != null; if (getterExists) { final boolean getterFromObject = getter.getDeclaringClass() == Object.class; final boolean getterWithoutParams = getter.getParameterTypes().length == 0; if (!getterFromObject && getterWithoutParams) { relevantDescriptors.add(propertyDescriptor); } } } } return relevantDescriptors; }
java
public void setLocalStripedActive() throws ClientException, IOException { if (gSession.serverAddressList == null) { throw new ClientException(ClientException.CALL_PASSIVE_FIRST); } try { gLocalServer.setStripedActive(gSession.serverAddressList); } catch (UnknownHostException e) { throw new ClientException(ClientException.UNKNOWN_HOST); } }
java
public static TransformedInputRow of(final InputRow row) { if (row instanceof TransformedInputRow) { // re-use existing transformed input row. return (TransformedInputRow) row; } else { return new TransformedInputRow(row, row.getId()); } }
python
def reload(self, reload_timeout, save_config): """Reload the device.""" PROCEED = re.compile(re.escape("Proceed with reload? [confirm]")) CONTINUE = re.compile(re.escape("Do you wish to continue?[confirm(y/n)]")) DONE = re.compile(re.escape("[Done]")) CONFIGURATION_COMPLETED = re.compile("SYSTEM CONFIGURATION COMPLETED") CONFIGURATION_IN_PROCESS = re.compile("SYSTEM CONFIGURATION IN PROCESS") # CONSOLE = re.compile("ios con[0|1]/RS?P[0-1]/CPU0 is now available") CONSOLE = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is now available") CONSOLE_STBY = re.compile("con[0|1]/(?:RS?P)?[0-1]/CPU0 is in standby") RECONFIGURE_USERNAME_PROMPT = "[Nn][Oo] root-system username is configured" ROOT_USERNAME_PROMPT = "Enter root-system username\: " ROOT_PASSWORD_PROMPT = "Enter secret( again)?\: " # BOOT=disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm,1; \ # disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm,2; # Candidate Boot Image num 0 is disk0:asr9k-os-mbi-6.1.1/0x100305/mbiasr9k-rsp3.vm # Candidate Boot Image num 1 is disk0:asr9k-os-mbi-5.3.4/0x100305/mbiasr9k-rsp3.vm CANDIDATE_BOOT_IMAGE = "Candidate Boot Image num 0 is .*vm" NOT_COMMITTED = re.compile(re.escape("Some active software packages are not yet committed. Proceed?[confirm]")) RELOAD_NA = re.compile("Reload to the ROM monitor disallowed from a telnet line") # 0 1 2 3 4 5 events = [RELOAD_NA, DONE, PROCEED, CONFIGURATION_IN_PROCESS, self.rommon_re, self.press_return_re, # 6 7 8 9 CONSOLE, CONFIGURATION_COMPLETED, RECONFIGURE_USERNAME_PROMPT, ROOT_USERNAME_PROMPT, # 10 11 12 13 14 15 ROOT_PASSWORD_PROMPT, self.username_re, TIMEOUT, EOF, self.reload_cmd, CANDIDATE_BOOT_IMAGE, # 16 17 NOT_COMMITTED, CONSOLE_STBY, CONTINUE] transitions = [ (RELOAD_NA, [0], -1, a_reload_na, 0), (CONTINUE, [0], 0, partial(a_send, "y\r"), 0), # temp for testing (NOT_COMMITTED, [0], -1, a_not_committed, 10), (DONE, [0], 2, None, 120), (PROCEED, [2], 3, partial(a_send, "\r"), reload_timeout), # this needs to be verified (self.rommon_re, [0, 3], 3, partial(a_send_boot, "boot"), 600), (CANDIDATE_BOOT_IMAGE, [0, 3], 4, a_message_callback, 600), (CONSOLE, [0, 1, 3, 4], 5, None, 600), # This is required. Otherwise nothing more is displayed on the console (self.press_return_re, [5], 6, partial(a_send, "\r"), 300), # configure root username and password the same as used for device connection. (RECONFIGURE_USERNAME_PROMPT, [6, 7, 10], 8, None, 10), (ROOT_USERNAME_PROMPT, [8], 9, partial(a_send_username, self.device.node_info.username), 1), (ROOT_PASSWORD_PROMPT, [9], 9, partial(a_send_password, self.device.node_info.password), 1), (CONFIGURATION_IN_PROCESS, [6, 9], 10, None, 1200), (CONFIGURATION_COMPLETED, [10], -1, a_reconnect, 0), (CONSOLE_STBY, [4], -1, ConnectionStandbyConsole("Standby Console"), 0), (self.username_re, [7, 9], -1, a_return_and_reconnect, 0), (TIMEOUT, [0, 1, 2], -1, ConnectionAuthenticationError("Unable to reload"), 0), (EOF, [0, 1, 2, 3, 4, 5], -1, ConnectionError("Device disconnected"), 0), (TIMEOUT, [6], 7, partial(a_send, "\r"), 180), (TIMEOUT, [7], -1, ConnectionAuthenticationError("Unable to reconnect after reloading"), 0), (TIMEOUT, [10], -1, a_reconnect, 0), ] fsm = FSM("RELOAD", self.device, events, transitions, timeout=600) return fsm.run()
java
private synchronized void createReconstituteThreadPool() { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "createReconstituteThreadPool"); if (_reconstituteThreadpool == null) { int maxThreadPoolSize; if (messageProcessor.getMessagingEngine().datastoreExists()) { //data store. Proceed and calcualte number of threads to use for reconstitution //get the thread pool size from the custom property maxThreadPoolSize = messageProcessor.getCustomProperties().get_max_reconstitute_threadpool_size(); int noOfCores = CpuInfo.getAvailableProcessors(); if (maxThreadPoolSize <= 0) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.info(tc, "INVALID_RECONSTITUTE_THREADPOOL_SIZE_CWSIP0068", new Object[] { maxThreadPoolSize }); maxThreadPoolSize = noOfCores; } if (maxThreadPoolSize > noOfCores) SibTr.warning(tc, "INVALID_RECONSTITUTE_THREADPOOL_SIZE_CWSIP0069"); } else { // File store: Don't use multiple threads as it may lead to contention maxThreadPoolSize = 1; } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.info(tc, "MAX_RECONSTITUTE_THREADPOOL_SIZE_CWSIP0070", new Object[] { maxThreadPoolSize }); _reconstituteThreadpool = new ThreadPoolExecutor(maxThreadPoolSize, maxThreadPoolSize, Long.MAX_VALUE, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "createReconstituteThreadPool"); }
python
async def save_form(self, form, request, **resources): """Save self form.""" if not self.can_create and not self.resource: raise muffin.HTTPForbidden() if not self.can_edit and self.resource: raise muffin.HTTPForbidden() resource = self.resource or self.populate() form.populate_obj(resource) return resource
java
@Override public void handleException(@Nullable Throwable e, boolean endApplication) { final ReportBuilder builder = new ReportBuilder(); builder.exception(e) .customData(customData); if (endApplication) { builder.endApplication(); } builder.build(reportExecutor); }
python
def sortarai(datablock, s, Zdiff, **kwargs): """ sorts data block in to first_Z, first_I, etc. Parameters _________ datablock : Pandas DataFrame with Thellier-Tellier type data s : specimen name Zdiff : if True, take difference in Z values instead of vector difference NB: this should always be False **kwargs : version : data model. if not 3, assume data model = 2.5 Returns _______ araiblock : [first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks] field : lab field (in tesla) """ if 'version' in list(kwargs.keys()) and kwargs['version'] == 3: dec_key, inc_key = 'dir_dec', 'dir_inc' Mkeys = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude'] meth_key = 'method_codes' temp_key, dc_key = 'treat_temp', 'treat_dc_field' dc_theta_key, dc_phi_key = 'treat_dc_field_theta', 'treat_dc_field_phi' # convert dataframe to list of dictionaries datablock = datablock.to_dict('records') else: dec_key, inc_key = 'measurement_dec', 'measurement_inc' Mkeys = ['measurement_magn_moment', 'measurement_magn_volume', 'measurement_magn_mass', 'measurement_magnitude'] meth_key = 'magic_method_codes' temp_key, dc_key = 'treatment_temp', 'treatment_dc_field' dc_theta_key, dc_phi_key = 'treatment_dc_field_theta', 'treatment_dc_field_phi' first_Z, first_I, zptrm_check, ptrm_check, ptrm_tail = [], [], [], [], [] field, phi, theta = "", "", "" starthere = 0 Treat_I, Treat_Z, Treat_PZ, Treat_PI, Treat_M = [], [], [], [], [] ISteps, ZSteps, PISteps, PZSteps, MSteps = [], [], [], [], [] GammaChecks = [] # comparison of pTRM direction acquired and lab field rec = datablock[0] for key in Mkeys: if key in list(rec.keys()) and rec[key] != "": momkey = key break # first find all the steps for k in range(len(datablock)): rec = datablock[k] temp = float(rec[temp_key]) methcodes = [] tmp = rec[meth_key].split(":") for meth in tmp: methcodes.append(meth.strip()) if 'LT-T-I' in methcodes and 'LP-TRM' not in methcodes and 'LP-PI-TRM' in methcodes: Treat_I.append(temp) ISteps.append(k) if field == "": field = float(rec[dc_key]) if phi == "": phi = float(rec[dc_phi_key]) theta = float(rec[dc_theta_key]) # stick first zero field stuff into first_Z if 'LT-NO' in methcodes: Treat_Z.append(temp) ZSteps.append(k) if 'LT-T-Z' in methcodes: Treat_Z.append(temp) ZSteps.append(k) if 'LT-PTRM-Z' in methcodes: Treat_PZ.append(temp) PZSteps.append(k) if 'LT-PTRM-I' in methcodes: Treat_PI.append(temp) PISteps.append(k) if 'LT-PTRM-MD' in methcodes: Treat_M.append(temp) MSteps.append(k) if 'LT-NO' in methcodes: dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) first_I.append([273, 0., 0., 0., 1]) first_Z.append([273, dec, inc, st, 1]) # NRM step for temp in Treat_I: # look through infield steps and find matching Z step if temp in Treat_Z: # found a match istep = ISteps[Treat_I.index(temp)] irec = datablock[istep] methcodes = [] tmp = irec[meth_key].split(":") for meth in tmp: methcodes.append(meth.strip()) # take last record as baseline to subtract brec = datablock[istep - 1] zstep = ZSteps[Treat_Z.index(temp)] zrec = datablock[zstep] # sort out first_Z records if "LP-PI-TRM-IZ" in methcodes: ZI = 0 else: ZI = 1 dec = float(zrec[dec_key]) inc = float(zrec[inc_key]) st = float(zrec[momkey]) first_Z.append([temp, dec, inc, st, ZI]) # sort out first_I records try: idec = float(irec[dec_key]) iinc = float(irec[inc_key]) istr = float(irec[momkey]) except TypeError as ex: raise Exception('Malformed data of some sort for dec/inc/moment in measurement: {}. You must fix this before proceeding.\n Bad record: {}'.format(irec.get('measurement', ''), irec)) X = dir2cart([idec, iinc, istr]) BL = dir2cart([dec, inc, st]) I = [] for c in range(3): I.append((X[c] - BL[c])) if I[2] != 0: iDir = cart2dir(I) if Zdiff == 0: first_I.append([temp, iDir[0], iDir[1], iDir[2], ZI]) else: first_I.append([temp, 0., 0., I[2], ZI]) gamma = angle([iDir[0], iDir[1]], [phi, theta]) else: first_I.append([temp, 0., 0., 0., ZI]) gamma = 0.0 # put in Gamma check (infield trm versus lab field) if 180. - gamma < gamma: gamma = 180. - gamma GammaChecks.append([temp - 273., gamma]) for temp in Treat_PI: # look through infield steps and find matching Z step step = PISteps[Treat_PI.index(temp)] rec = datablock[step] dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) brec = datablock[step - 1] # take last record as baseline to subtract pdec = float(brec[dec_key]) pinc = float(brec[inc_key]) pint = float(brec[momkey]) X = dir2cart([dec, inc, st]) prevX = dir2cart([pdec, pinc, pint]) I = [] for c in range(3): I.append(X[c] - prevX[c]) dir1 = cart2dir(I) if Zdiff == 0: ptrm_check.append([temp, dir1[0], dir1[1], dir1[2]]) else: ptrm_check.append([temp, 0., 0., I[2]]) # in case there are zero-field pTRM checks (not the SIO way) for temp in Treat_PZ: step = PZSteps[Treat_PZ.index(temp)] rec = datablock[step] dec = float(rec[dec_key]) inc = float(rec[inc_key]) st = float(rec[momkey]) brec = datablock[step - 1] pdec = float(brec[dec_key]) pinc = float(brec[inc_key]) pint = float(brec[momkey]) X = dir2cart([dec, inc, st]) prevX = dir2cart([pdec, pinc, pint]) I = [] for c in range(3): I.append(X[c] - prevX[c]) dir2 = cart2dir(I) zptrm_check.append([temp, dir2[0], dir2[1], dir2[2]]) # get pTRM tail checks together - for temp in Treat_M: # tail check step - just do a difference in magnitude! step = MSteps[Treat_M.index(temp)] rec = datablock[step] st = float(rec[momkey]) if temp in Treat_Z: step = ZSteps[Treat_Z.index(temp)] brec = datablock[step] pint = float(brec[momkey]) # X=dir2cart([dec,inc,st]) # prevX=dir2cart([pdec,pinc,pint]) # I=[] # for c in range(3):I.append(X[c]-prevX[c]) # d=cart2dir(I) # ptrm_tail.append([temp,d[0],d[1],d[2]]) # difference - if negative, negative tail! ptrm_tail.append([temp, 0, 0, st - pint]) else: print( s, ' has a tail check with no first zero field step - check input file! for step', temp - 273.) # # final check # if len(first_Z) != len(first_I): print(len(first_Z), len(first_I)) print(" Something wrong with this specimen! Better fix it or delete it ") input(" press return to acknowledge message") araiblock = (first_Z, first_I, ptrm_check, ptrm_tail, zptrm_check, GammaChecks) return araiblock, field
python
def _trim_adapters(fastq_files, out_dir, data): """ for small insert sizes, the read length can be longer than the insert resulting in the reverse complement of the 3' adapter being sequenced. this takes adapter sequences and trims the only the reverse complement of the adapter MYSEQUENCEAAAARETPADA -> MYSEQUENCEAAAA (no polyA trim) """ to_trim = _get_sequences_to_trim(data["config"], SUPPORTED_ADAPTERS) if dd.get_trim_reads(data) == "fastp": out_files, report_file = _fastp_trim(fastq_files, to_trim, out_dir, data) else: out_files, report_file = _atropos_trim(fastq_files, to_trim, out_dir, data) # quality_format = _get_quality_format(data["config"]) # out_files = replace_directory(append_stem(fastq_files, "_%s.trimmed" % name), out_dir) # log_file = "%s_log_cutadapt.txt" % splitext_plus(out_files[0])[0] # out_files = _cutadapt_trim(fastq_files, quality_format, to_trim, out_files, log_file, data) # if file_exists(log_file): # content = open(log_file).read().replace(fastq_files[0], name) # if len(fastq_files) > 1: # content = content.replace(fastq_files[1], name) # open(log_file, 'w').write(content) return out_files
java
public static Manifest getManifest(Location jarLocation) throws IOException { URI uri = jarLocation.toURI(); // Small optimization if the location is local if ("file".equals(uri.getScheme())) { JarFile jarFile = new JarFile(new File(uri)); try { return jarFile.getManifest(); } finally { jarFile.close(); } } // Otherwise, need to search it with JarInputStream JarInputStream is = new JarInputStream(new BufferedInputStream(jarLocation.getInputStream())); try { // This only looks at the first entry, which if is created with jar util, then it'll be there. Manifest manifest = is.getManifest(); if (manifest != null) { return manifest; } // Otherwise, slow path. Need to goes through the entries JarEntry jarEntry = is.getNextJarEntry(); while (jarEntry != null) { if (JarFile.MANIFEST_NAME.equals(jarEntry.getName())) { return new Manifest(is); } jarEntry = is.getNextJarEntry(); } } finally { is.close(); } return null; }
python
def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Flag unapproved C++11 headers. include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name)
python
def _create_app(self, color_depth, term='xterm'): """ Create CommandLineInterface for this client. Called when the client wants to attach the UI to the server. """ output = Vt100_Output(_SocketStdout(self._send_packet), lambda: self.size, term=term, write_binary=False) self.client_state = self.pymux.add_client( input=self._pipeinput, output=output, connection=self, color_depth=color_depth) print('Start running app...') future = self.client_state.app.run_async() print('Start running app got future...', future) @future.add_done_callback def done(_): print('APP DONE.........') print(future.result()) self._close_connection()
python
def _bucket_key(self): """ Returns hash bucket key for the redis key """ return "{}.size.{}".format( self.prefix, (self._hashed_key//1000) if self._hashed_key > 1000 else self._hashed_key)
java
public static ObjectInputStream newObjectInputStream(Path self, final ClassLoader classLoader) throws IOException { return IOGroovyMethods.newObjectInputStream(Files.newInputStream(self), classLoader); }
java
public void marshall(DeleteAliasRequest deleteAliasRequest, ProtocolMarshaller protocolMarshaller) { if (deleteAliasRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(deleteAliasRequest.getOrganizationId(), ORGANIZATIONID_BINDING); protocolMarshaller.marshall(deleteAliasRequest.getEntityId(), ENTITYID_BINDING); protocolMarshaller.marshall(deleteAliasRequest.getAlias(), ALIAS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def action(method=None, **kwargs): """ Decorator that turns a function or controller method into an kervi action. it is possible to call the action in other kervi processes or modules. @action def my_action(p) ... call it via Actions["my_action"](10) @action(action_id="action_1", name="This is my action") def my_action(p) ... call it via Actions["action_1"](10) :Keyword Arguments: * *action_id* (``str``) -- The action_id is the id you use when you call the action. By default the action takes the name of function but you can override it with action_id. * *name* (``str``) -- Name to show in UI if the action is linked to a panel. """ def action_wrap(f): action_id = kwargs.get("action_id", f.__name__) name = kwargs.get("name", action_id) if not _is_method(f): # not "." in f.__qualname__: action = Action(f, action_id, name) Actions.add(action) return action else: qual_name = getattr(f, "__qualname__", None) owner_class = kwargs.get("controller_class", None) if owner_class: qual_name = owner_class + "." + f.__name__ if qual_name: Actions.add_unbound(qual_name, action_id, name) setattr(f, "set_interrupt", _SetInterrupt(action_id)) else: print("using upython? if yes you need to pass the name of the controller class via the controller_class parameter.") return f if method: return action_wrap(method) else: return action_wrap
java
public OperationStatusResponseInner beginDeleteInstances(String resourceGroupName, String vmScaleSetName, List<String> instanceIds) { return beginDeleteInstancesWithServiceResponseAsync(resourceGroupName, vmScaleSetName, instanceIds).toBlocking().single().body(); }
java
protected boolean checkForSignIn() { Authentication authentication = SecurityContextHolder.getContext().getAuthentication(); if ((authentication != null) && authentication.isAuthenticated()) { LOG.debug("Security context contains CAS authentication"); return true; } return false; }
python
def create_slim_mapping(self, subset=None, subset_nodes=None, relations=None, disable_checks=False): """ Create a dictionary that maps between all nodes in an ontology to a subset Arguments --------- ont : `Ontology` Complete ontology to be mapped. Assumed pre-filtered for relationship types subset : str Name of subset to map to, e.g. goslim_generic nodes : list If no named subset provided, subset is passed in as list of node ids relations : list List of relations to filter on disable_checks: bool Unless this is set, this will prevent a mapping being generated with non-standard relations. The motivation here is that the ontology graph may include relations that it is inappropriate to propagate gene products over, e.g. transports, has-part Return ------ dict maps all nodes in ont to one or more non-redundant nodes in subset Raises ------ ValueError if the subset is empty """ if subset is not None: subset_nodes = self.extract_subset(subset) logger.info("Extracting subset: {} -> {}".format(subset, subset_nodes)) if subset_nodes is None or len(subset_nodes) == 0: raise ValueError("subset nodes is blank") subset_nodes = set(subset_nodes) logger.debug("SUBSET: {}".format(subset_nodes)) # Use a sub-ontology for mapping subont = self if relations is not None: subont = self.subontology(relations=relations) if not disable_checks: for r in subont.relations_used(): if r != 'subClassOf' and r != 'BFO:0000050' and r != 'subPropertyOf': raise ValueError("Not safe to propagate over a graph with edge type: {}".format(r)) m = {} for n in subont.nodes(): ancs = subont.ancestors(n, reflexive=True) ancs_in_subset = subset_nodes.intersection(ancs) m[n] = list(subont.filter_redundant(ancs_in_subset)) return m
java
private static String[] readSMARTSPattern(String filename) throws Exception { InputStream ins = StandardSubstructureSets.class.getClassLoader().getResourceAsStream(filename); BufferedReader reader = new BufferedReader(new InputStreamReader(ins)); List<String> tmp = new ArrayList<String>(); String line; while ((line = reader.readLine()) != null) { if (line.startsWith("#") || line.trim().length() == 0) continue; String[] toks = line.split(":"); StringBuffer s = new StringBuffer(); for (int i = 1; i < toks.length - 1; i++) s.append(toks[i] + ":"); s.append(toks[toks.length - 1]); tmp.add(s.toString().trim()); } return tmp.toArray(new String[]{}); }
python
def create_input_for_numbered_sequences(headerDir, sourceDir, containers, maxElements): """Creates additional source- and header-files for the numbered sequence MPL-containers.""" # Create additional container-list without "map". containersWithoutMap = containers[:] try: containersWithoutMap.remove('map') except ValueError: # We can safely ignore if "map" is not contained in 'containers'! pass # Create header/source-files. create_more_container_files(headerDir, ".hpp", maxElements, containers, containersWithoutMap) create_more_container_files(sourceDir, ".cpp", maxElements, containers, containersWithoutMap)
python
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"): """ Trim the spectrogram to remove silent head/tails from the speech sample. Keep all remaining frames or either speech or non-speech only @param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else), 'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails), or 'no_filter' (no filter is applied) """ if not features.size: raise ValueError("vad_filter_features(): data sample is empty, no features extraction is possible") vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8) features = numpy.asarray(features, dtype=numpy.float64) features = numpy.reshape(features, (vad_labels.shape[0], -1)) # logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape)) # print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features))) # first, take the whole thing, in case there are problems later filtered_features = features # if VAD detection worked on this sample if vad_labels is not None and filter_frames != "no_filter": # make sure the size of VAD labels and sectrogram lenght match if len(vad_labels) == len(features): # take only speech frames, as in VAD speech frames are 1 and silence are 0 speech, = numpy.nonzero(vad_labels) silences = None if filter_frames == "silence_only": # take only silent frames - those for which VAD gave zeros silences, = numpy.nonzero(vad_labels == 0) if len(speech): nzstart = speech[0] # index of the first non-zero nzend = speech[-1] # index of the last non-zero if filter_frames == "silence_only": # extract only silent frames # take only silent frames in-between the speech silences = silences[silences > nzstart] silences = silences[silences < nzend] filtered_features = features[silences, :] elif filter_frames == "speech_only": filtered_features = features[speech, :] else: # when we take all filtered_features = features[nzstart:nzend + 1, :] # numpy slicing is a non-closed interval [) else: logger.error("vad_filter_features(): VAD labels should be the same length as energy bands") logger.info("vad_filter_features(): filtered_features shape: %s", str(filtered_features.shape)) return filtered_features
java
public JsAPISignature createJsAPISignature(String url){ if(jsAPITicket == null || jsAPITicket.expired()) { getJsAPITicket(); } long timestamp = System.currentTimeMillis() / 1000; String nonce = RandomStringGenerator.getRandomStringByLength(16); String ticket = jsAPITicket.getTicket(); try { String signature = SHA1.getSHA1("jsapi_ticket=" + ticket + "&noncestr=" + nonce + "&timestamp=" + timestamp + "&url=" + url); JsAPISignature jsAPISignature = new JsAPISignature(); jsAPISignature.setAppId(wxClient.getClientId()); jsAPISignature.setNonce(nonce); jsAPISignature.setTimestamp(timestamp); jsAPISignature.setSignature(signature); jsAPISignature.setUrl(url); return jsAPISignature; } catch (AesException e) { logger.error("createJsAPISignature failed", e); throw new WxRuntimeException(999, e.getMessage()); } }
python
def connect(self): """Connect to beanstalkd server.""" self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(self._connect_timeout) SocketError.wrap(self._socket.connect, (self.host, self.port)) self._socket.settimeout(None) self._socket_file = self._socket.makefile('rb')
java
@Override public int compareTo(Object obj) { if (obj instanceof URI) { return this.toString().compareTo(obj.toString()); } else { return -1; } }
java
public static void writeAll(GatheringByteChannel ch, ByteBuffer[] bbs, int offset, int length) throws IOException { long all = 0; for (int ii=0;ii<length;ii++) { all += bbs[offset+ii].remaining(); } int count = 0; while (all > 0) { long rc = ch.write(bbs, offset, length); if (rc == 0) { count++; } else { all -= rc; } if (count > 100) { throw new IOException("Couldn't write all."); } } }
java
@Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof IPubSub) { System.err.println("beanName:" + beanName + " bean:" + bean); Publisher.listen((IPubSub) bean, this.getRedis()); } return bean; }
python
def is_unwrapped(f): """If `f` was imported and then unwrapped, this function might return True. .. |is_unwrapped| replace:: :py:func:`is_unwrapped`""" try: g = look_up(object_name(f)) return g != f and unwrap(g) == f except (AttributeError, TypeError, ImportError): return False
java
public NotificationWithSubscribers withSubscribers(Subscriber... subscribers) { if (this.subscribers == null) { setSubscribers(new java.util.ArrayList<Subscriber>(subscribers.length)); } for (Subscriber ele : subscribers) { this.subscribers.add(ele); } return this; }
python
def do_printActivity(self,args): """Print scaling activity details""" parser = CommandArgumentParser("printActivity") parser.add_argument(dest='index',type=int,help='refresh'); args = vars(parser.parse_args(args)) index = args['index'] activity = self.activities[index] pprint(activity)
java
protected void configureSecurity(final T builder, final String name) { final GrpcChannelProperties properties = getPropertiesFor(name); final Security security = properties.getSecurity(); if (properties.getNegotiationType() != NegotiationType.TLS // non-default || isNonNullAndNonBlank(security.getAuthorityOverride()) || isNonNullAndNonBlank(security.getCertificateChainPath()) || isNonNullAndNonBlank(security.getPrivateKeyPath()) || isNonNullAndNonBlank(security.getTrustCertCollectionPath())) { throw new IllegalStateException( "Security is configured but this implementation does not support security!"); } }
python
def _create_genome_regions(data): """Create whole genome contigs we want to process, only non-alts. Skips problem contigs like HLAs for downstream analysis. """ work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data))) variant_regions = os.path.join(work_dir, "target-genome.bed") with file_transaction(data, variant_regions) as tx_variant_regions: with open(tx_variant_regions, "w") as out_handle: for c in shared.get_noalt_contigs(data): out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size)) return variant_regions
java
@Override public void clearCache() { entityCache.clearCache(CommerceUserSegmentCriterionImpl.class); finderCache.clearCache(FINDER_CLASS_NAME_ENTITY); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITH_PAGINATION); finderCache.clearCache(FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION); }
python
def _stacklevel_above_module(mod_name): """ Return the stack level (with 1 = caller of this function) of the first caller that is not defined in the specified module (e.g. "pywbem.cim_obj"). The returned stack level can be used directly by the caller of this function as an argument for the stacklevel parameter of warnings.warn(). """ stacklevel = 2 # start with caller of our caller frame = inspect.stack()[stacklevel][0] # stack() level is 0-based while True: if frame.f_globals.get('__name__', None) != mod_name: break stacklevel += 1 frame = frame.f_back del frame return stacklevel
python
def get_dicts_generator(word_min_freq=4, char_min_freq=2, word_ignore_case=False, char_ignore_case=False): """Get word and character dictionaries from sentences. :param word_min_freq: The minimum frequency of a word. :param char_min_freq: The minimum frequency of a character. :param word_ignore_case: Word will be transformed to lower case before saving to dictionary. :param char_ignore_case: Character will be transformed to lower case before saving to dictionary. :return gen: A closure that accepts sentences and returns the dictionaries. """ word_count, char_count = {}, {} def get_dicts(sentence=None, return_dict=False): """Update and return dictionaries for each sentence. :param sentence: A list of strings representing the sentence. :param return_dict: Returns the dictionaries if it is True. :return word_dict, char_dict, max_word_len: """ if sentence is not None: for word in sentence: if not word: continue if word_ignore_case: word_key = word.lower() else: word_key = word word_count[word_key] = word_count.get(word_key, 0) + 1 for char in word: if char_ignore_case: char_key = char.lower() else: char_key = char char_count[char_key] = char_count.get(char_key, 0) + 1 if not return_dict: return None word_dict, char_dict = {'': 0, '<UNK>': 1}, {'': 0, '<UNK>': 1} max_word_len = 0 for word, count in word_count.items(): if count >= word_min_freq: word_dict[word] = len(word_dict) max_word_len = max(max_word_len, len(word)) for char, count in char_count.items(): if count >= char_min_freq: char_dict[char] = len(char_dict) return word_dict, char_dict, max_word_len return get_dicts
java
public String getPrototypeName() { String name = getClass().getName(); if (name.startsWith(ORG_GEOMAJAS)) { name = name.substring(ORG_GEOMAJAS.length()); } name = name.replace(".dto.", ".impl."); return name.substring(0, name.length() - 4) + "Impl"; }
java
public FixedDelayBuilder<P> delay(String... delays) { for (String d : delays) { if (d != null) { delayProps.add(d); } } return this; }
python
def _merge_cfgnodes(self, cfgnode_0, cfgnode_1): """ Merge two adjacent CFGNodes into one. :param CFGNode cfgnode_0: The first CFGNode. :param CFGNode cfgnode_1: The second CFGNode. :return: None """ assert cfgnode_0.addr + cfgnode_0.size == cfgnode_1.addr addr0, addr1 = cfgnode_0.addr, cfgnode_1.addr new_node = cfgnode_0.merge(cfgnode_1) # Update the graph and the nodes dict accordingly if addr1 in self._nodes_by_addr: self._nodes_by_addr[addr1].remove(cfgnode_1) if not self._nodes_by_addr[addr1]: del self._nodes_by_addr[addr1] del self._nodes[cfgnode_1.block_id] self._nodes_by_addr[addr0].remove(cfgnode_0) if not self._nodes_by_addr[addr0]: del self._nodes_by_addr[addr0] del self._nodes[cfgnode_0.block_id] in_edges = list(self.graph.in_edges(cfgnode_0, data=True)) out_edges = list(self.graph.out_edges(cfgnode_1, data=True)) self.graph.remove_node(cfgnode_0) self.graph.remove_node(cfgnode_1) self.graph.add_node(new_node) for src, _, data in in_edges: self.graph.add_edge(src, new_node, **data) for _, dst, data in out_edges: self.graph.add_edge(new_node, dst, **data) # Put the new node into node dicts self._nodes[new_node.block_id] = new_node self._nodes_by_addr[addr0].append(new_node)
java
@SuppressWarnings("unchecked") public <T> Http2StreamChannelBootstrap attr(AttributeKey<T> key, T value) { if (key == null) { throw new NullPointerException("key"); } if (value == null) { synchronized (attrs) { attrs.remove(key); } } else { synchronized (attrs) { attrs.put(key, value); } } return this; }
python
def get_available_palettes(chosen_palette): ''' Given a chosen palette, returns tuple of those available, or None when not found. Because palette support of a particular level is almost always a superset of lower levels, this should return all available palettes. Returns: Boolean, None: is tty or None if not found. ''' result = None try: result = ALL_PALETTES[:ALL_PALETTES.index(chosen_palette)+1] except ValueError: pass return result
python
def is_locked(self, request: AxesHttpRequest, credentials: dict = None) -> bool: """ Checks if the request or given credentials are locked. """ if settings.AXES_LOCK_OUT_AT_FAILURE: return self.get_failures(request, credentials) >= settings.AXES_FAILURE_LIMIT return False
python
def optimizer_step(self, batch_info, device, model, rollout): """ Single optimization step for a model """ batch_info.optimizer.zero_grad() batch_result = self.calculate_gradient(batch_info=batch_info, device=device, model=model, rollout=rollout) clip_gradients(batch_result, model, self.max_grad_norm) batch_info.optimizer.step(closure=None) self.post_optimization_step(batch_info, device, model, rollout) return batch_result
java
@Deprecated public static Class<?> getRawType(Type type) { if (type instanceof Class) { return (Class<?>) type; } else if (type instanceof ParameterizedType) { return getRawType(((ParameterizedType) type).getRawType()); } else if (type instanceof TypeVariable<?>) { return getRawType(((TypeVariable<?>) type).getBounds()[0]); } throw new IllegalStateException("unknown type: " + type); }
python
def get_composition(source, *fxns): """Compose several extractors together, on a source.""" val = source for fxn in fxns: val = fxn(val) return val
java
@GET @Path("/validate/{token}") @Produces(MediaType.APPLICATION_JSON) public Response validateAccount(@PathParam("token") String token) { try{ if (authServerLogic.validateAccount(token)) { return Response.ok().build(); } else { return Response.status(Status.NOT_FOUND).build(); } }catch (ServerDAO.DAOException e) { return fromDAOExpection(e); } }
python
def compare_datetimes(d1, d2): """ Compares two datetimes safely, whether they are timezone-naive or timezone-aware. If either datetime is naive it is converted to an aware datetime assuming UTC. Args: d1: first datetime. d2: second datetime. Returns: -1 if d1 < d2, 0 if they are the same, or +1 is d1 > d2. """ if d1.tzinfo is None or d1.tzinfo.utcoffset(d1) is None: d1 = d1.replace(tzinfo=pytz.UTC) if d2.tzinfo is None or d2.tzinfo.utcoffset(d2) is None: d2 = d2.replace(tzinfo=pytz.UTC) if d1 < d2: return -1 elif d1 > d2: return 1 return 0
python
def get_included_resources(request, serializer=None): """ Build a list of included resources. """ include_resources_param = request.query_params.get('include') if request else None if include_resources_param: return include_resources_param.split(',') else: return get_default_included_resources_from_serializer(serializer)
java
public Subscription getSubscriptionFromRequest(HttpServletRequest request) { String tokenHeader = request.getHeader("Authorization"); if (tokenHeader == null || tokenHeader.indexOf("Token ") != 0) { LOG.info("Empty authorizationheader, or header does not start with 'Token ': " + tokenHeader); return null; } String tokenValue = tokenHeader.substring(tokenHeader.indexOf(" ")).trim(); LOG.info("Token value: " + tokenValue); if (tokenValue != null) { return subscriptionService.findSubscriptionByAuthorizationToken(tokenValue); } return null; }
java
public static String getResponseCacheHeaderSettings(final String contentType) { String parameter = MessageFormat.format(RESPONSE_CACHE_HEADER_SETTINGS, contentType); return get().getString(parameter); }
python
def close(self): """ Closes the object. """ if self._is_open: self.mode = None if self.handle: self.handle.close() self.handle = None self.filename = None self._is_open = False self.status = None
java
public void update(String server, Transaction t, boolean deleteMissingChildren) { on(server).update(t, deleteMissingChildren); }
java
public AbstractSheet<TRow, TColumn, TCell> setColumns(List<TColumn> columns) { this.columns = columns; return this; }
java
@SafeVarargs public static <T> void split(T[] src, T[]... parts) { int srcPos = 0; for (T[] dest : parts) { System.arraycopy(src, srcPos, dest, 0, dest.length); srcPos += dest.length; } }
python
def transaction_retry(max_retries=1): """Decorator for methods doing database operations. If the database operation fails, it will retry the operation at most ``max_retries`` times. """ def _outer(fun): @wraps(fun) def _inner(*args, **kwargs): _max_retries = kwargs.pop('exception_retry_count', max_retries) for retries in count(0): try: return fun(*args, **kwargs) except Exception: # pragma: no cover # Depending on the database backend used we can experience # various exceptions. E.g. psycopg2 raises an exception # if some operation breaks the transaction, so saving # the task result won't be possible until we rollback # the transaction. if retries >= _max_retries: raise try: rollback_unless_managed() except Exception: pass return _inner return _outer
python
def get_context_data(self, **kwargs): """Returns the template context, including the workflow class. This method should be overridden in subclasses to provide additional context data to the template. """ context = super(WorkflowView, self).get_context_data(**kwargs) workflow = self.get_workflow() workflow.verify_integrity() context[self.context_object_name] = workflow next = self.request.GET.get(workflow.redirect_param_name) context['REDIRECT_URL'] = next context['layout'] = self.get_layout() # For consistency with Workflow class context['modal'] = 'modal' in context['layout'] if ADD_TO_FIELD_HEADER in self.request.META: context['add_to_field'] = self.request.META[ADD_TO_FIELD_HEADER] return context
python
def list_launch_configurations(region=None, key=None, keyid=None, profile=None): ''' List all Launch Configurations. CLI example:: salt myminion boto_asg.list_launch_configurations ''' ret = get_all_launch_configurations(region, key, keyid, profile) return [r.name for r in ret]
python
def cmd_output_add(self, args): '''add new output''' device = args[0] print("Adding output %s" % device) try: conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system) conn.mav.srcComponent = self.settings.source_component except Exception: print("Failed to connect to %s" % device) return self.mpstate.mav_outputs.append(conn) try: mp_util.child_fd_list_add(conn.port.fileno()) except Exception: pass
python
def detector(detector_type): """ Returns a detector of the specified type. """ if detector_type == 'point_cloud_box': return PointCloudBoxDetector() elif detector_type == 'rgbd_foreground_mask_query': return RgbdForegroundMaskQueryImageDetector() elif detector_type == 'rgbd_foreground_mask': return RgbdForegroundMaskDetector() raise ValueError('Detector type %s not understood' %(detector_type))
python
def mutant_charts_for_feature(example_protos, feature_name, serving_bundles, viz_params): """Returns JSON formatted for rendering all charts for a feature. Args: example_proto: The example protos to mutate. feature_name: The string feature name to mutate. serving_bundles: One `ServingBundle` object per model, that contains the information to make the serving request. viz_params: A `VizParams` object that contains the UI state of the request. Raises: InvalidUserInputError if `viz_params.feature_index_pattern` requests out of range indices for `feature_name` within `example_proto`. Returns: A JSON-able dict for rendering a single mutant chart. parsed in `tf-inference-dashboard.html`. { 'chartType': 'numeric', # oneof('numeric', 'categorical') 'data': [A list of data] # parseable by vz-line-chart or vz-bar-chart } """ def chart_for_index(index_to_mutate): mutant_features, mutant_examples = make_mutant_tuples( example_protos, original_feature, index_to_mutate, viz_params) charts = [] for serving_bundle in serving_bundles: inference_result_proto = run_inference(mutant_examples, serving_bundle) charts.append(make_json_formatted_for_single_chart( mutant_features, inference_result_proto, index_to_mutate)) return charts try: original_feature = parse_original_feature_from_example( example_protos[0], feature_name) except ValueError as e: return { 'chartType': 'categorical', 'data': [] } indices_to_mutate = viz_params.feature_indices or range( original_feature.length) chart_type = ('categorical' if original_feature.feature_type == 'bytes_list' else 'numeric') try: return { 'chartType': chart_type, 'data': [ chart_for_index(index_to_mutate) for index_to_mutate in indices_to_mutate ] } except IndexError as e: raise common_utils.InvalidUserInputError(e)
python
def updateSeriesAttributes(request): ''' This function handles the filtering of available series classes and seriesteachers when a series is chosen on the Substitute Teacher reporting form. ''' if request.method == 'POST' and request.POST.get('event'): series_option = request.POST.get('event') or None seriesClasses = EventOccurrence.objects.filter(event__id=series_option) seriesTeachers = SeriesTeacher.objects.filter(event__id=series_option) else: # Only return attributes for valid requests return JsonResponse({}) outClasses = {} for option in seriesClasses: outClasses[str(option.id)] = option.__str__() outTeachers = {} for option in seriesTeachers: outTeachers[str(option.id)] = option.__str__() return JsonResponse({ 'id_occurrences': outClasses, 'id_replacedStaffMember': outTeachers, })
java
private final int m() { int n = 0; int i = k0; while (true) { if (i > j) return n; if (!cons(i)) break; i++; } i++; while (true) { while (true) { if (i > j) return n; if (cons(i)) break; i++; } i++; n++; while (true) { if (i > j) return n; if (!cons(i)) break; i++; } i++; } }
java
public void eInit(XtendTypeDeclaration container, String name, String modifier, IJvmTypeProvider context) { setTypeResolutionContext(context); if (this.sarlField == null) { this.container = container; this.sarlField = SarlFactory.eINSTANCE.createSarlField(); this.sarlField.setAnnotationInfo(XtendFactory.eINSTANCE.createXtendMember()); this.sarlField.setName(name); if (Strings.equal(modifier, "var") || Strings.equal(modifier, "val")) { this.sarlField.getModifiers().add(modifier); } else { throw new IllegalStateException("Invalid modifier"); } container.getMembers().add(this.sarlField); } }
python
def search_raw(self, query, indices=None, doc_types=None, headers=None, **query_params): """Execute a search against one or more indices to get the search hits. `query` must be a Search object, a Query object, or a custom dictionary of search parameters using the query DSL to be passed directly. """ from .query import Search, Query if isinstance(query, Query): query = query.search() if isinstance(query, Search): query = query.serialize() body = self._encode_query(query) path = self._make_path(indices, doc_types, "_search") return self._send_request('GET', path, body, params=query_params, headers=headers)
python
def _check_submodule_status(root, submodules): """check submodule status Has three return values: 'missing' - submodules are absent 'unclean' - submodules have unstaged changes 'clean' - all submodules are up to date """ if hasattr(sys, "frozen"): # frozen via py2exe or similar, don't bother return 'clean' if not os.path.exists(os.path.join(root, '.git')): # not in git, assume clean return 'clean' for submodule in submodules: if not os.path.exists(submodule): return 'missing' # Popen can't handle unicode cwd on Windows Python 2 if sys.platform == 'win32' and sys.version_info[0] < 3 \ and not isinstance(root, bytes): root = root.encode(sys.getfilesystemencoding() or 'ascii') # check with git submodule status proc = subprocess.Popen('git submodule status', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=root) status, _ = proc.communicate() status = status.decode("ascii", "replace") for line in status.splitlines(): if line.startswith('-'): return 'missing' elif line.startswith('+'): return 'unclean' return 'clean'
java
public static ExpectationFailed of(Throwable cause) { if (_localizedErrorMsg()) { return of(cause, defaultMessage(EXPECTATION_FAILED)); } else { touchPayload().cause(cause); return _INSTANCE; } }
python
def voight_painting(h): """Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix. """ # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls are not supported') # sort by prefix indices = h.prefix_argsort() h = np.take(h, indices, axis=1) # paint painting = paint_shared_prefixes(memoryview_safe(np.asarray(h))) return painting, indices
python
def _list_superclasses(class_def): """Return a list of the superclasses of the given class""" superclasses = class_def.get('superClasses', []) if superclasses: # Make sure to duplicate the list return list(superclasses) sup = class_def.get('superClass', None) if sup: return [sup] else: return []
java
public List<File> scan(Resource resource) { Scanner scanner = buildContext.newScanner(new File(resource.getDirectory()), true); setupScanner(scanner, resource); scanner.scan(); List<File> files = new ArrayList<File>(); for (String file : scanner.getIncludedFiles()) { files.add(new File(resource.getDirectory(), file)); } return files; }
java
public static CProduct[] findByGroupId_PrevAndNext(long CProductId, long groupId, OrderByComparator<CProduct> orderByComparator) throws com.liferay.commerce.product.exception.NoSuchCProductException { return getPersistence() .findByGroupId_PrevAndNext(CProductId, groupId, orderByComparator); }
python
def set_target(self, target: EventDispatcherBase) -> None: """ This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance. """ if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
java
public boolean eq(LongIntSortedVector other) { // This is slow, but correct. LongIntSortedVector v1 = LongIntSortedVector.getWithNoZeroValues(this); LongIntSortedVector v2 = LongIntSortedVector.getWithNoZeroValues(other); if (v2.size() != v1.size()) { return false; } for (LongIntEntry ve : v1) { if (!Primitives.equals(ve.get(), v2.get(ve.index()))) { return false; } } for (LongIntEntry ve : v2) { if (!Primitives.equals(ve.get(), v1.get(ve.index()))) { return false; } } return true; }
java
public final String getLikelyEndContextMismatchCause(SanitizedContentKind contentKind) { Preconditions.checkArgument(!isValidEndContextForContentKind(contentKind)); if (contentKind == SanitizedContentKind.ATTRIBUTES) { // Special error message for ATTRIBUTES since it has some specific logic. return "an unterminated attribute value, or ending with an unquoted attribute"; } switch (state) { case HTML_TAG_NAME: case HTML_TAG: case HTML_ATTRIBUTE_NAME: case HTML_NORMAL_ATTR_VALUE: return "an unterminated HTML tag or attribute"; case CSS: return "an unclosed style block or attribute"; case JS: case JS_LINE_COMMENT: // Line comments are terminated by end of input. return "an unclosed script block or attribute"; case CSS_COMMENT: case HTML_COMMENT: case JS_BLOCK_COMMENT: return "an unterminated comment"; case CSS_DQ_STRING: case CSS_SQ_STRING: case JS_DQ_STRING: case JS_SQ_STRING: return "an unterminated string literal"; case URI: case CSS_URI: case CSS_DQ_URI: case CSS_SQ_URI: return "an unterminated or empty URI"; case JS_REGEX: return "an unterminated regular expression"; default: if (templateNestDepth != 0) { return "an unterminated <template> element"; } else { return "unknown to compiler"; } } }
java
public <T> T doAs(PrivilegedAction<T> action) { return Subject.doAs(null, action); }
java
public Javalin start() { Util.logJavalinBanner(this.config.showJavalinBanner); JettyUtil.disableJettyLogger(); long startupTimer = System.currentTimeMillis(); if (server.getStarted()) { throw new IllegalStateException("Cannot call start() again on a started server."); } server.setStarted(true); Util.printHelpfulMessageIfLoggerIsMissing(); eventManager.fireEvent(JavalinEvent.SERVER_STARTING); try { log.info("Starting Javalin ..."); server.start(servlet, wsServlet); log.info("Javalin started in " + (System.currentTimeMillis() - startupTimer) + "ms \\o/"); eventManager.fireEvent(JavalinEvent.SERVER_STARTED); } catch (Exception e) { log.error("Failed to start Javalin"); eventManager.fireEvent(JavalinEvent.SERVER_START_FAILED); if (Boolean.TRUE.equals(server.server().getAttribute("is-default-server"))) { stop();// stop if server is default server; otherwise, the caller is responsible to stop } if (e.getMessage() != null && e.getMessage().contains("Failed to bind to")) { throw new RuntimeException("Port already in use. Make sure no other process is using port " + server.getServerPort() + " and try again.", e); } else if (e.getMessage() != null && e.getMessage().contains("Permission denied")) { throw new RuntimeException("Port 1-1023 require elevated privileges (process must be started by admin).", e); } throw new RuntimeException(e); } return this; }
python
def orthonormal_initializer(output_size, input_size, debug=False): """adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/linalg.py Parameters ---------- output_size : int input_size : int debug : bool Whether to skip this initializer Returns ------- Q : np.ndarray The orthonormal weight matrix of input_size x output_size """ print((output_size, input_size)) if debug: Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) return np.transpose(Q.astype(np.float32)) I = np.eye(output_size) lr = .1 eps = .05 / (output_size + input_size) success = False tries = 0 while not success and tries < 10: Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) for i in range(100): QTQmI = Q.T.dot(Q) - I loss = np.sum(QTQmI ** 2 / 2) Q2 = Q ** 2 Q -= lr * Q.dot(QTQmI) / ( np.abs(Q2 + Q2.sum(axis=0, keepdims=True) + Q2.sum(axis=1, keepdims=True) - 1) + eps) if np.max(Q) > 1e6 or loss > 1e6 or not np.isfinite(loss): tries += 1 lr /= 2 break success = True if success: print(('Orthogonal pretrainer loss: %.2e' % loss)) else: print('Orthogonal pretrainer failed, using non-orthogonal random matrix') Q = np.random.randn(input_size, output_size) / np.sqrt(output_size) return np.transpose(Q.astype(np.float32))
python
def getRaw(self, instance, **kwargs): """Returns raw field value (possible wrapped in BaseUnit) """ value = ObjectField.get(self, instance, **kwargs) # getattr(instance, "Remarks") returns a BaseUnit if callable(value): value = value() return value
python
def append(self, new_points_len): '''Allocate memory for a new run and return a reference to that memory wrapped in an array of size ``(new_points_len, self.dim)``. :param new_points_len: Integer; the number of points to be stored in the target memory. ''' new_points_len = int(new_points_len) assert new_points_len >= 1, "Must at least append one point!" # find out start and stop index of the new memory try: new_points_start = self._slice_for_run_nr[-1][-1] except IndexError: new_points_start = 0 new_points_stop = new_points_start + new_points_len # store slice for new_points self._slice_for_run_nr.append( (new_points_start , new_points_stop) ) if self.memleft < new_points_len: #need to allocate new memory self.memleft = 0 #careful: do not use self._points because this may include unused memory self._points = _np.vstack( (self[:],_np.empty((new_points_len, self.dim))) ) else: #have enough memory self.memleft -= new_points_len # return reference to the new points return self._points[new_points_start:new_points_stop]
java
public static PluginSpec readPluginSpecFile(URL pluginSpec) throws IOException { return (PluginSpec) mapper.reader(PluginSpec.class).readValue(pluginSpec); }
java
public int deleteByTableName(String tableName) throws SQLException { DeleteBuilder<Extensions, Void> db = deleteBuilder(); db.where().eq(Extensions.COLUMN_TABLE_NAME, tableName); int deleted = db.delete(); return deleted; }
python
def lcopt_bw2_forwast_setup(use_autodownload=True, forwast_path=None, db_name=FORWAST_PROJECT_NAME, overwrite=False): """ Utility function to set up brightway2 to work correctly with lcopt using the FORWAST database instead of ecoinvent By default it'll try and download the forwast database as a .bw2package file from lca-net If you've downloaded the forwast .bw2package file already you can set use_autodownload=False and forwast_path to point to the downloaded file To overwrite an existing version, set overwrite=True """ if use_autodownload: forwast_filepath = forwast_autodownload(FORWAST_URL) elif forwast_path is not None: forwast_filepath = forwast_path else: raise ValueError('Need a path if not using autodownload') if storage.project_type == 'single': db_name = storage.single_project_name if bw2_project_exists(db_name): bw2.projects.set_current(db_name) else: bw2.projects.set_current(db_name) bw2.bw2setup() else: if db_name in bw2.projects: if overwrite: bw2.projects.delete_project(name=db_name, delete_dir=True) else: print('Looks like bw2 is already set up for the FORWAST database - if you want to overwrite the existing version run lcopt.utils.lcopt_bw2_forwast_setup in a python shell using overwrite = True') return False # no need to keep running bw2setup - we can just copy a blank project which has been set up before if not bw2_project_exists(DEFAULT_BIOSPHERE_PROJECT): lcopt_biosphere_setup() bw2.projects.set_current(DEFAULT_BIOSPHERE_PROJECT) bw2.create_core_migrations() bw2.projects.copy_project(db_name, switch=True) bw2.BW2Package.import_file(forwast_filepath) return True
python
def dict_find_keys(dict_, val_list): r""" Args: dict_ (dict): val_list (list): Returns: dict: found_dict CommandLine: python -m utool.util_dict --test-dict_find_keys Example: >>> # ENABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> import utool as ut >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1, ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2, ... 'lsh': 6, 'kdtree_single': 4} >>> val_list = [1] >>> found_dict = dict_find_keys(dict_, val_list) >>> result = ut.repr2(ut.map_vals(sorted, found_dict)) >>> print(result) {1: ['default', 'kdtree']} """ found_dict = { search_val: [key for key, val in six.iteritems(dict_) if val == search_val] for search_val in val_list } return found_dict