language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def _check_cores_output_sizes(self): """Checks the output_sizes of the cores of the DeepRNN module. Raises: ValueError: if the outputs of the cores cannot be concatenated along their first dimension. """ for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))): first_core_list = core_sizes[0][1:] for i, core_list in enumerate(core_sizes[1:]): if core_list[1:] != first_core_list: raise ValueError("The outputs of the provided cores are not able " "to be concatenated along the first feature " "dimension. Core 0 has shape %s, whereas Core %d " "has shape %s - these must only differ in the first " "dimension" % (core_sizes[0], i + 1, core_list))
java
public ListAssert<Object> isArray() { Node node = assertType(ARRAY); return new JsonListAssert((List<?>)node.getValue(), path.asPrefix(), configuration) .as("Different value found in node \"%s\"", path); }
java
@Override public Object getValue(ELContext context, Object base, Object property) { if (context == null) { throw new NullPointerException("context is null"); } Object result = null; if (isResolvable(base)) { int index = toIndex(null, property); List<?> list = (List<?>) base; result = index < 0 || index >= list.size() ? null : list.get(index); context.setPropertyResolved(true); } return result; }
java
private void checkDirectory(File d, String dtype) { if (!d.isAbsolute()) { throw new IllegalArgumentException(dtype + " directory must be an absolute path (" + d.getPath() + ")"); } if (!d.exists()) { throw new IllegalArgumentException(dtype + " directory does not exist (" + d.getPath() + ")"); } if (!d.isDirectory()) { throw new IllegalArgumentException(dtype + " directory value is not a directory (" + d.getPath() + ")"); } }
java
@Override public HttpRequest convertRequest(Envelope request) { ByteBuf requestBuffer = Unpooled.buffer(request.getSerializedSize()); try { OutputStream outputStream = new ByteBufOutputStream(requestBuffer); request.writeTo(outputStream); outputStream.flush(); } catch (IOException e) { // deliberately ignored, as the underlying operation doesn't involve I/O } String host = ((InetSocketAddress) channel().remoteAddress()).getAddress().getHostAddress(); String uriPath = String.format("%s%s/%s", path, request.getService(), request.getMethod()); FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, new QueryStringEncoder(uriPath).toString(), requestBuffer); httpRequest.headers().set(HttpHeaders.Names.HOST, host); httpRequest.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE); httpRequest.headers().set(HttpHeaders.Names.CONTENT_LENGTH, requestBuffer.readableBytes()); httpRequest.headers().set(HttpHeaders.Names.CONTENT_TYPE, QuartzProtocol.CONTENT_TYPE); return httpRequest; }
python
def _select_features_to_plot(self, X): """ Select features to plot. feature_index is always used as the filter and if filter_names is supplied, a new feature_index is computed from those names. """ if self.feature_index: if self.feature_names: raise YellowbrickWarning( 'Both feature_index and feature_names ' 'are specified. feature_names is ignored' ) if (min(self.feature_index) < 0 or max(self.feature_index) >= X.shape[1]): raise YellowbrickValueError('Feature index is out of range') elif self.feature_names: self.feature_index = [] features_list = self.features_.tolist() for feature_name in self.feature_names: try: self.feature_index.append( features_list.index(feature_name) ) except ValueError: raise YellowbrickValueError( '{} not in labels'.format(feature_name) )
java
private static boolean hasValidSubscriptionType(RosterPacket.Item item) { switch (item.getItemType()) { case none: case from: case to: case both: return true; default: return false; } }
java
@Deprecated protected void handleComputeFields(int julianDay) { int era, year; int[] fields = new int[3]; jdToCE(julianDay, getJDEpochOffset(), fields); // fields[0] eyear // fields[1] month // fields[2] day if (isAmeteAlemEra()) { era = AMETE_ALEM; year = fields[0] + AMETE_MIHRET_DELTA; } else { if (fields[0] > 0) { era = AMETE_MIHRET; year = fields[0]; } else { era = AMETE_ALEM; year = fields[0] + AMETE_MIHRET_DELTA; } } internalSet(EXTENDED_YEAR, fields[0]); internalSet(ERA, era); internalSet(YEAR, year); internalSet(MONTH, fields[1]); internalSet(DAY_OF_MONTH, fields[2]); internalSet(DAY_OF_YEAR, (30 * fields[1]) + fields[2]); }
python
def get_assessment_part(self): """If there's an AssessmentSection ask it first for the part. This will take advantage of the fact that the AssessmentSection may have already cached the Part in question. """ if self._magic_parent_id is None: assessment_part_id = Id(self.my_osid_object._my_map['assessmentPartId']) else: assessment_part_id = self._magic_parent_id if self._assessment_section is not None: return self._assessment_section._get_assessment_part(assessment_part_id) # else: apls = get_assessment_part_lookup_session(runtime=self.my_osid_object._runtime, proxy=self.my_osid_object._proxy, section=self._assessment_section) apls.use_federated_bank_view() apls.use_unsequestered_assessment_part_view() return apls.get_assessment_part(assessment_part_id)
java
@Override public CPInstance fetchByG_ST_First(long groupId, int status, OrderByComparator<CPInstance> orderByComparator) { List<CPInstance> list = findByG_ST(groupId, status, 0, 1, orderByComparator); if (!list.isEmpty()) { return list.get(0); } return null; }
java
public static SSLSocketFactory getSslSocketFactory() { if (!sslDisabled) { return SSLSocketFactory.getSocketFactory(); } try { final X509Certificate[] _AcceptedIssuers = new X509Certificate[] {}; final SSLContext ctx = SSLContext.getInstance("TLS"); final X509TrustManager tm = new X509TrustManager() { @Override public X509Certificate[] getAcceptedIssuers() { return _AcceptedIssuers; } @Override public void checkServerTrusted(final X509Certificate[] chain, final String authType) throws CertificateException { } @Override public void checkClientTrusted(final X509Certificate[] chain, final String authType) throws CertificateException { } }; ctx.init(null, new TrustManager[] { tm }, new SecureRandom()); return new SSLSocketFactory(ctx, SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER); } catch (final Throwable t) { throw new RuntimeException(t); } }
java
void writeConfigPropsDeclare(Definition def, Writer out, int indent) throws IOException { if (getConfigProps(def) == null) return; for (int i = 0; i < getConfigProps(def).size(); i++) { writeWithIndent(out, indent, "/** " + getConfigProps(def).get(i).getName() + " */\n"); if (def.isUseAnnotation()) { writeIndent(out, indent); out.write("@ConfigProperty(defaultValue = \"" + getConfigProps(def).get(i).getValue() + "\")"); if (getConfigProps(def).get(i).isRequired()) { out.write(" @NotNull"); } writeEol(out); } writeWithIndent(out, indent, "private " + getConfigProps(def).get(i).getType() + " " + getConfigProps(def).get(i).getName() + ";\n\n"); } }
python
def _set_status(self, status, result=None): """ update operation status :param str status: New status :param cdumay_result.Result result: Execution result """ logger.info( "{}.SetStatus: {}[{}] status update '{}' -> '{}'".format( self.__class__.__name__, self.__class__.path, self.uuid, self.status, status ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.set_status(status, result)
python
def generate_wf_state_log(self): """ Logs the state of workflow and content of task_data. """ output = '\n- - - - - -\n' output += "WORKFLOW: %s ( %s )" % (self.current.workflow_name.upper(), self.current.workflow.name) output += "\nTASK: %s ( %s )\n" % (self.current.task_name, self.current.task_type) output += "DATA:" for k, v in self.current.task_data.items(): if v: output += "\n\t%s: %s" % (k, v) output += "\nCURRENT:" output += "\n\tACTIVITY: %s" % self.current.activity output += "\n\tPOOL: %s" % self.current.pool output += "\n\tIN EXTERNAL: %s" % self.wf_state['in_external'] output += "\n\tLANE: %s" % self.current.lane_name output += "\n\tTOKEN: %s" % self.current.token sys._zops_wf_state_log = output return output
python
def simplify(self, options=None): """ returns a dict describing a simple snapshot of this change, and its children if any. """ simple = { "class": type(self).__name__, "is_change": self.is_change(), "description": self.get_description(), "label": self.label, } if options: simple["is_ignored"] = self.is_ignored(options) if isinstance(self, Addition): simple["is_addition"] = True if isinstance(self, Removal): simple["is_removal"] = True if self.entry: simple["entry"] = self.entry return simple
python
def _pusher_connect_handler(self, data): """Event handler for the connection_established event. Binds the shortlink_scanned event """ self.channel = self.pusher.subscribe(self.pos_callback_chan) for listener in self.pusher_connected_listeners: listener(data)
python
def _run_raw(self, cmd, ignore_errors=False): """Runs command directly, skipping tmux interface""" # TODO: capture stdout/stderr for feature parity with aws_backend result = os.system(cmd) if result != 0: if ignore_errors: self.log(f"command ({cmd}) failed.") assert False, "_run_raw failed"
java
public ISREInstall getDefaultSRE() { final Object[] objects = this.sresList.getCheckedElements(); if (objects.length == 0) { return null; } return (ISREInstall) objects[0]; }
python
def _handle_processing_error(err, errstream, client): """Handle ProcessingError exceptions.""" errors = sorted(err.events, key=operator.attrgetter("index")) failed = [e.event for e in errors] silent = all(isinstance(e.error, OutOfOrderError) for e in errors) if errstream: _deliver_errored_events(errstream, failed) must_raise = False else: must_raise = True for _, event, error, tb in errors: if isinstance(error, OutOfOrderError): # Not really an error: do not log this to Sentry continue try: raise six.reraise(*tb) except Exception as err: if client: client.captureException() msg = "{}{}: {}".format(type(err).__name__, err.args, json.dumps(event, indent=4)) rlogger.error(msg, exc_info=tb) if must_raise: raise
python
def install_packages(): """ Install a set of baseline packages and configure where necessary """ if env.verbosity: print env.host, "INSTALLING & CONFIGURING NODE PACKAGES:" #Get a list of installed packages p = run("dpkg -l | awk '/ii/ {print $2}'").split('\n') #Remove apparmor - TODO we may enable this later if env.overwrite or not server_state('apparmor-disabled') and 'apparmor' in p: with settings(warn_only=True): sudo('/etc/init.d/apparmor stop') sudo('update-rc.d -f apparmor remove') set_server_state('apparmor-disabled') #The principle we will use is to only install configurations and packages #if they do not already exist (ie not manually installed or other method) env.installed_packages[env.host] = [] role = env.role_lookup[env.host_string] packages = get_packages() for package in packages: if not package in p: install_package(package) if env.verbosity: print ' * installed',package env.installed_packages[env.host].append(package) if env.overwrite or env.installed_packages[env.host]: #always store the latest complete list set_server_state('packages_installed', packages) env.installed_packages[env.host] = packages if env.overwrite and 'apache2' in env.installed_packages[env.host]: #some sensible defaults -might move to putting this config in a template sudo("rm -f /etc/apache2/sites-enabled/000-default") sed('/etc/apache2/apache2.conf',before='KeepAlive On',after='KeepAlive Off',use_sudo=True, backup='') sed('/etc/apache2/apache2.conf',before='StartServers 2', after='StartServers 1', use_sudo=True, backup='') sed('/etc/apache2/apache2.conf',before='MaxClients 150', after='MaxClients 100', use_sudo=True, backup='') for module in env.APACHE_DISABLE_MODULES: sudo('rm -f /etc/apache2/mods-enabled/%s*'% module) #Install base python packages #We'll use easy_install at this stage since it doesn't download if the package #is current whereas pip always downloads. #Once both these packages mature we'll move to using the standard Ubuntu packages if (env.overwrite or not server_state('pip-venv-wrapper-installed')) and 'python-setuptools' in packages: sudo("easy_install virtualenv") sudo("easy_install pip") sudo("easy_install virtualenvwrapper") if env.verbosity: print " * easy installed pip, virtualenv, virtualenvwrapper" set_server_state('pip-venv-wrapper-installed') if not contains("/home/%s/.profile"% env.user,"source /usr/local/bin/virtualenvwrapper.sh"): append("/home/%s/.profile"% env.user, "export WORKON_HOME=$HOME/env") append("/home/%s/.profile"% env.user, "source /usr/local/bin/virtualenvwrapper.sh") #cleanup after easy_install sudo("rm -rf build")
python
def _distribution_info(self): """Creates the distribution name and the expected extension for the CSPICE package and returns it. :return (distribution, extension) tuple where distribution is the best guess from the strings available within the platform_urls list of strings, and extension is either "zip" or "tar.Z" depending on whether we are dealing with a Windows platform or else. :rtype: tuple (str, str) :raises: KeyError if the (system, machine) tuple does not correspond to any of the supported SpiceyPy environments. """ print('Gathering information...') system = platform.system() # Cygwin system is CYGWIN-NT-xxx. system = 'cygwin' if 'CYGWIN' in system else system processor = platform.processor() machine = '64bit' if sys.maxsize > 2 ** 32 else '32bit' print('SYSTEM: ', system) print('PROCESSOR:', processor) print('MACHINE: ', machine) return self._dists[(system, machine)]
java
public Property getProperty(String... name) { if (name == null || name.length == 0) return null; for (Property p : properties) { if (Objects.equals(name[0], p.name)) { if (name.length == 1) return p; else return p.getProperty(Arrays.copyOfRange(name, 1, name.length)); } } return null; }
java
public static String documentToPrettyString(Document document) { StringWriter stringWriter = new StringWriter(); OutputFormat prettyPrintFormat = OutputFormat.createPrettyPrint(); XMLWriter xmlWriter = new XMLWriter(stringWriter, prettyPrintFormat); try { xmlWriter.write(document); } catch (IOException e) { // Ignore, shouldn't happen. } return stringWriter.toString(); }
python
def get_last_lineno(node): """Recursively find the last line number of the ast node.""" max_lineno = 0 if hasattr(node, "lineno"): max_lineno = node.lineno for _, field in ast.iter_fields(node): if isinstance(field, list): for value in field: if isinstance(value, ast.AST): max_lineno = max(max_lineno, get_last_lineno(value)) elif isinstance(field, ast.AST): max_lineno = max(max_lineno, get_last_lineno(field)) return max_lineno
java
public Collection<Book> getAvailableBooks() { synchronized (librarian) { Collection<Book> books = new LinkedList<Book>(); for (Entry<String, Integer> entry : isbns_to_quantities.entrySet()) { if (entry.getValue() > 0) { books.add(getBook(entry.getKey())); } } return books; } }
java
public static String padRight(CharSequence self, Number numberOfChars, CharSequence padding) { String s = self.toString(); int numChars = numberOfChars.intValue(); if (numChars <= s.length()) { return s; } else { return s + getPadding(padding.toString(), numChars - s.length()); } }
java
public void setField(String name, MLArray value, int index) { keys.add(name); currentIndex = index; if ( mlStructArray.isEmpty() || mlStructArray.size() <= index ) { mlStructArray.add(index, new LinkedHashMap<String, MLArray>() ); } mlStructArray.get(index).put(name, value); }
java
public LocalTime withSecond(int second) { if (this.second == second) { return this; } SECOND_OF_MINUTE.checkValidValue(second); return create(hour, minute, second, nano); }
python
def properties_dict_for(self, index): """ Return a dictionary, containing properties as keys and indices as index Thus, the indices for each constraint, which is contained will be collected as one dictionary Example: let properties: 'one':[1,2,3,4], 'two':[3,5,6] >>> properties_dict_for([2,3,5]) {'one':[2,3], 'two':[3,5]} """ props = self.properties_for(index) prop_index = extract_properties_to_index(index, props) return prop_index
java
protected boolean hasOnlyProblemResources() { return m_model.getGroups().get(m_groupIndex).getResources().size() == m_model.countResourcesInGroup( new CmsPublishDataModel.HasProblems(), m_model.getGroups().get(m_groupIndex).getResources()); }
java
public List<T> apply(List<T> selectedCandidates, Random rng) { List<T> population = selectedCandidates; for (EvolutionaryOperator<T> operator : pipeline) { population = operator.apply(population, rng); } return population; }
python
def _store_parameters(self): """Store startup params and config in datadir/.mlaunch_startup.""" datapath = self.dir out_dict = { 'protocol_version': 2, 'mtools_version': __version__, 'parsed_args': self.args, 'unknown_args': self.unknown_args, 'startup_info': self.startup_info } if not os.path.exists(datapath): os.makedirs(datapath) try: json.dump(out_dict, open(os.path.join(datapath, '.mlaunch_startup'), 'w'), indent=-1) except Exception as ex: print("ERROR STORING Parameters:", ex)
python
def quantile_for_list_of_values(self, **kwargs): """Returns Manager containing quantiles along an axis for numeric columns. Returns: DataManager containing quantiles of original DataManager along an axis. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_list_of_values(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q") numeric_only = kwargs.get("numeric_only", True) assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)) if numeric_only: new_columns = self.numeric_columns() else: new_columns = [ col for col, dtype in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype)) ] if axis: # If along rows, then drop the nonnumeric columns, record the index, and # take transpose. We have to do this because if we don't, the result is all # in one column for some reason. nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] query_compiler = self.drop(columns=nonnumeric) new_columns = query_compiler.index else: query_compiler = self def quantile_builder(df, **kwargs): result = df.quantile(**kwargs) return result.T if axis == 1 else result func = query_compiler._prepare_method(quantile_builder, **kwargs) q_index = pandas.Float64Index(q) new_data = query_compiler._map_across_full_axis(axis, func) # This took a long time to debug, so here is the rundown of why this is needed. # Previously, we were operating on select indices, but that was broken. We were # not correctly setting the columns/index. Because of how we compute `to_pandas` # and because of the static nature of the index for `axis=1` it is easier to # just handle this as the transpose (see `quantile_builder` above for the # transpose within the partition) than it is to completely rework other # internal methods. Basically we are returning the transpose of the object for # correctness and cleanliness of the code. if axis == 1: q_index = new_columns new_columns = pandas.Float64Index(q) result = self.__constructor__(new_data, q_index, new_columns) return result.transpose() if axis == 1 else result
java
public static UserPreferences getProjectPreferences(IProject project, boolean forceRead) { try { UserPreferences prefs = (UserPreferences) project.getSessionProperty(SESSION_PROPERTY_USERPREFS); if (prefs == null || forceRead) { prefs = readUserPreferences(project); if (prefs == null) { prefs = getWorkspacePreferences().clone(); } project.setSessionProperty(SESSION_PROPERTY_USERPREFS, prefs); } return prefs; } catch (CoreException e) { FindbugsPlugin.getDefault().logException(e, "Error getting SpotBugs preferences for project"); return getWorkspacePreferences().clone(); } }
python
def _get_jacobian_hessian_strategy(self): """ Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method """ if self.jacobian is not None and self.hessian is None: jacobian = None hessian = 'cs' elif self.jacobian is None and self.hessian is None: jacobian = 'cs' hessian = soBFGS(exception_strategy='damp_update') else: jacobian = None hessian = None return jacobian, hessian
python
def _key_parenleft(self, text): """Action for '('""" self.hide_completion_widget() if self.get_current_line_to_cursor(): last_obj = self.get_last_obj() if last_obj and not last_obj.isdigit(): self.insert_text(text) self.show_object_info(last_obj, call=True) return self.insert_text(text)
java
public static CommerceDiscountRel fetchByCN_CPK_Last(long classNameId, long classPK, OrderByComparator<CommerceDiscountRel> orderByComparator) { return getPersistence() .fetchByCN_CPK_Last(classNameId, classPK, orderByComparator); }
java
public void messageReceived(Object message) { if (message instanceof Packet) { try { messageReceived(conn, (Packet) message); } catch (Exception e) { log.warn("Exception on packet receive", e); } } else { // raw buffer handling IoBuffer in = (IoBuffer) message; // filter based on current connection state RTMP rtmp = conn.getState(); final byte connectionState = conn.getStateCode(); log.trace("connectionState: {}", RTMP.states[connectionState]); // get the handshake OutboundHandshake handshake = (OutboundHandshake) conn.getAttribute(RTMPConnection.RTMP_HANDSHAKE); switch (connectionState) { case RTMP.STATE_CONNECT: log.debug("Handshake - client phase 1 - size: {}", in.remaining()); in.get(); // 0x01 byte handshakeType = in.get(); // usually 0x03 (rtmp) log.debug("Handshake - byte type: {}", handshakeType); // copy out 1536 bytes byte[] s1 = new byte[Constants.HANDSHAKE_SIZE]; in.get(s1); // decode s1 IoBuffer out = handshake.decodeServerResponse1(IoBuffer.wrap(s1)); if (out != null) { // set state to indicate we're waiting for S2 rtmp.setState(RTMP.STATE_HANDSHAKE); conn.writeRaw(out); // if we got S0S1+S2 continue processing if (in.remaining() >= Constants.HANDSHAKE_SIZE) { log.debug("Handshake - client phase 2 - size: {}", in.remaining()); if (handshake.decodeServerResponse2(in)) { // conn.removeAttribute(RTMPConnection.RTMP_HANDSHAKE); // conn.setStateCode(RTMP.STATE_CONNECTED); // connectionOpened(conn); } else { log.warn("Handshake failed on S2 processing"); //conn.close(); } // open regardless of server type conn.removeAttribute(RTMPConnection.RTMP_HANDSHAKE); conn.setStateCode(RTMP.STATE_CONNECTED); connectionOpened(conn); } } else { log.warn("Handshake failed on S0S1 processing"); conn.close(); } break; case RTMP.STATE_HANDSHAKE: log.debug("Handshake - client phase 2 - size: {}", in.remaining()); if (handshake.decodeServerResponse2(in)) { // conn.removeAttribute(RTMPConnection.RTMP_HANDSHAKE); // conn.setStateCode(RTMP.STATE_CONNECTED); // connectionOpened(conn); } else { log.warn("Handshake failed on S2 processing"); //conn.close(); } // open regardless of server type conn.removeAttribute(RTMPConnection.RTMP_HANDSHAKE); conn.setStateCode(RTMP.STATE_CONNECTED); connectionOpened(conn); break; default: throw new IllegalStateException("Invalid RTMP state: " + connectionState); } } }
java
public String get(final Object key) { if (this._current.containsKey(key)) { return this._current.get(key); } else if (this._parent != null) { return this._parent.get(key); } else { return null; } }
java
public static Object createInstance(String name, Object... args) throws Exception { return createInstance(name, (String) null, args); }
java
public static <A> RegExp<A> buildConcat(List<RegExp<A>> concatTerms) { if(concatTerms.isEmpty()) return new EmptyStr<A>(); Iterator<RegExp<A>> it = concatTerms.iterator(); RegExp<A> re = it.next(); while(it.hasNext()) { re = new Concat<A>(re, it.next()); } return re; }
java
public void pushBrowserHistory(String strHistory, String browserTitle, boolean bPushToBrowser) { if (bPushToBrowser) if (this.getBrowserManager() != null) this.getBrowserManager().pushBrowserHistory(strHistory, browserTitle); // Let browser know about the new screen }
python
def rgevolve_leadinglog(self, scale_out): """Compute the leading logarithmix approximation to the solution of the SMEFT RGEs from the initial scale to `scale_out`. Returns a dictionary with parameters and Wilson coefficients. Much faster but less precise that `rgevolve`. """ self._check_initial() return rge.smeft_evolve_leadinglog(C_in=self.C_in, scale_high=self.scale_high, scale_in=self.scale_in, scale_out=scale_out)
python
def T2(word, rules): '''Split any VV sequence that is not a genuine diphthong or long vowel. E.g., [ta.e], [ko.et.taa]. This rule can apply within VVV+ sequences.''' WORD = word offset = 0 for vv in vv_sequences(WORD): seq = vv.group(1) if not phon.is_diphthong(seq) and not phon.is_long(seq): i = vv.start(1) + 1 + offset WORD = WORD[:i] + '.' + WORD[i:] offset += 1 rules += ' T2' if word != WORD else '' return WORD, rules
java
protected static void removeTralingZeros( StringBuilder sb ) { int endIndex = sb.length(); if (endIndex > 0) { --endIndex; int index = endIndex; while (sb.charAt(index) == '0') { --index; } if (index < endIndex) sb.delete(index + 1, endIndex + 1); } }
java
@SuppressWarnings("PMD.GuardLogStatement") public W acquire() throws InterruptedException { // Stop draining, because we obviously need this kind of buffers Optional.ofNullable(idleTimer.getAndSet(null)).ifPresent( timer -> timer.cancel()); if (createdBufs.get() < maximumBufs) { // Haven't reached maximum, so if no buffer is queued, create one. W buffer = queue.poll(); if (buffer != null) { buffer.lockBuffer(); return buffer; } return createBuffer(); } // Wait for buffer to become available. if (logger.isLoggable(Level.FINE)) { // If configured, log message after waiting some time. W buffer = queue.poll(acquireWarningLimit, TimeUnit.MILLISECONDS); if (buffer != null) { buffer.lockBuffer(); return buffer; } logger.log(Level.FINE, new Throwable(), () -> Thread.currentThread().getName() + " waiting > " + acquireWarningLimit + "ms for buffer, while executing:"); } W buffer = queue.take(); buffer.lockBuffer(); return buffer; }
java
public ServiceInstanceQuery getInQueryCriterion(String key, List<String> list){ QueryCriterion c = new InQueryCriterion(key, list); addQueryCriterion(c); return this; }
python
def to_xml(self): """Get this batch as XML""" assert self.connection != None s = '<?xml version="1.0" encoding="UTF-8"?>\n' s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version for p in self.paths: s += ' <Path>%s</Path>\n' % self.escape(p) s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference s += '</InvalidationBatch>\n' return s
java
@POST @Path("me/username") @RolesAllowed({"ROLE_ADMIN", "ROLE_USER"}) public Response changeUsername(@Context HttpServletRequest request, UsernameRequest usernameRequest) { Long userId = (Long) request.getAttribute(OAuth2Filter.NAME_USER_ID); return changeUsername(userId, usernameRequest); }
java
public static void loadCustomerProperties(Properties props){ for(Entry<Object, Object> entry : props.entrySet()){ setProperty((String)entry.getKey(), entry.getValue()); } }
java
@Override public int add(DownloadRequest request) throws IllegalArgumentException { checkReleased("add(...) called on a released ThinDownloadManager."); if (request == null) { throw new IllegalArgumentException("DownloadRequest cannot be null"); } return mRequestQueue.add(request); }
python
def get_node(request): """MNCore.getCapabilities() → Node.""" api_major_int = 2 if d1_gmn.app.views.util.is_v2_api(request) else 1 node_pretty_xml = d1_gmn.app.node.get_pretty_xml(api_major_int) return django.http.HttpResponse(node_pretty_xml, d1_common.const.CONTENT_TYPE_XML)
python
def run_program(program, args=None, **subprocess_kwargs): """ Run program in a separate process. NOTE: returns the process object created by `subprocess.Popen()`. This can be used with `proc.communicate()` for example. If 'shell' appears in the kwargs, it must be False, otherwise ProgramError will be raised. If only the program name is given and not the full path, a lookup will be performed to find the program. If the lookup fails, ProgramError will be raised. Note that stdin, stdout and stderr will be set by default to PIPE unless specified in subprocess_kwargs. :str program: The name of the program to run. :list args: The program arguments. :subprocess_kwargs: These will be passed to subprocess.Popen. """ if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']: raise ProgramError( "This function is only for non-shell programs, " "use run_shell_command() instead.") fullcmd = find_program(program) if not fullcmd: raise ProgramError("Program %s was not found" % program) # As per subprocess, we make a complete list of prog+args fullcmd = [fullcmd] + (args or []) for stream in ['stdin', 'stdout', 'stderr']: subprocess_kwargs.setdefault(stream, subprocess.PIPE) subprocess_kwargs = alter_subprocess_kwargs_by_platform( **subprocess_kwargs) return subprocess.Popen(fullcmd, **subprocess_kwargs)
python
def profile_methods(self, method_list): """帮助函数执行时记录数据.""" self.method_exec_info = [] # 开始数据记录进程 self.record_thread.stop_flag = False self.record_thread.start() for name in method_list: if name not in self.check_macthing_object.MATCHING_METHODS.keys(): continue time.sleep(3) # 留出绘图空白区 start_time = time.time() # 记录开始时间 print("--->>> start '%s' matching:\n" % name) kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果 print("\n\n\n") end_time = time.time() # 记录结束时间 time.sleep(3) # 留出绘图空白区 # 记录本次匹配的相关数据 ret_info = { "name": name, "start_time": start_time, "end_time": end_time, "result": result, "kp_sch": len(kp_sch), "kp_src": len(kp_src), "good": len(good)} self.method_exec_info.append(ret_info) self.record_thread.stop_flag = True
java
public void extractZipEntry(final ZipFile zipFile, final ZipEntry target, final File toDirectory) throws IOException { ZipExtensions.extractZipEntry(zipFile, target, toDirectory); }
java
public Future<AuthenticationResult> acquireTokenByAuthorizationCode( final String authorizationCode, final URI redirectUri, final ClientCredential credential, final AuthenticationCallback callback) { this.validateAuthCodeRequestInput(authorizationCode, redirectUri, credential, null); return this.acquireTokenByAuthorizationCode(authorizationCode, redirectUri, credential, null, callback); }
python
def setup_injection_workflow(workflow, output_dir=None, inj_section_name='injections', exttrig_file=None, tags =None): """ This function is the gateway for setting up injection-generation jobs in a workflow. It should be possible for this function to support a number of different ways/codes that could be used for doing this, however as this will presumably stay as a single call to a single code (which need not be inspinj) there are currently no subfunctions in this moudle. Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. output_dir : path The directory in which injection files will be stored. inj_section_name : string (optional, default='injections') The string that corresponds to the option describing the exe location in the [executables] section of the .ini file and that corresponds to the section (and sub-sections) giving the options that will be given to the code at run time. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. This will be used in output names. Returns -------- inj_files : pycbc.workflow.core.FileList The list of injection files created by this call. inj_tags : list of strings The tag corresponding to each injection file and used to uniquely identify them. The FileList class contains functions to search based on tags. """ if tags is None: tags = [] logging.info("Entering injection module.") make_analysis_dir(output_dir) # Get full analysis segment for output file naming full_segment = workflow.analysis_time ifos = workflow.ifos # Identify which injections to do by presence of sub-sections in # the configuration file inj_tags = [] inj_files = FileList([]) for section in workflow.cp.get_subsections(inj_section_name): inj_tag = section.upper() curr_tags = tags + [inj_tag] # FIXME: Remove once fixed in pipedown # TEMPORARILY we require inj tags to end in "INJ" if not inj_tag.endswith("INJ"): err_msg = "Currently workflow requires injection names to end with " err_msg += "a inj suffix. Ie. bnslininj or bbhinj. " err_msg += "%s is not good." %(inj_tag.lower()) raise ValueError(err_msg) # Parse for options in ini file injection_method = workflow.cp.get_opt_tags("workflow-injections", "injections-method", curr_tags) if injection_method in ["IN_WORKFLOW", "AT_RUNTIME"]: # FIXME: Add ability to specify different exes inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name, out_dir=output_dir, ifos='HL', tags=curr_tags) node = inj_job.create_node(full_segment) if injection_method == "AT_RUNTIME": workflow.execute_node(node) else: workflow.add_node(node) inj_file = node.output_files[0] inj_files.append(inj_file) elif injection_method == "PREGENERATED": injectionFilePath = workflow.cp.get_opt_tags("workflow-injections", "injections-pregenerated-file", curr_tags) injectionFilePath = resolve_url(injectionFilePath) file_url = urlparse.urljoin('file:', urllib.pathname2url(injectionFilePath)) inj_file = File('HL', 'PREGEN_inj_file', full_segment, file_url, tags=curr_tags) inj_file.PFN(injectionFilePath, site='local') inj_files.append(inj_file) elif injection_method in ["IN_COH_PTF_WORKFLOW", "AT_COH_PTF_RUNTIME"]: inj_job = LalappsInspinjExecutable(workflow.cp, inj_section_name, out_dir=output_dir, ifos=ifos, tags=curr_tags) node = inj_job.create_node(full_segment, exttrig_file) if injection_method == "AT_COH_PTF_RUNTIME": workflow.execute_node(node) else: workflow.add_node(node) inj_file = node.output_files[0] if workflow.cp.has_option("workflow-injections", "em-bright-only"): em_filter_job = PycbcDarkVsBrightInjectionsExecutable( workflow.cp, 'em_bright_filter', tags=curr_tags, out_dir=output_dir, ifos=ifos) node = em_filter_job.create_node(inj_file, full_segment, curr_tags) if injection_method == "AT_COH_PTF_RUNTIME": workflow.execute_node(node) else: workflow.add_node(node) inj_file = node.output_files[0] if workflow.cp.has_option("workflow-injections", "do-jitter-skyloc"): jitter_job = LigolwCBCJitterSkylocExecutable(workflow.cp, 'jitter_skyloc', tags=curr_tags, out_dir=output_dir, ifos=ifos) node = jitter_job.create_node(inj_file, full_segment, curr_tags) if injection_method == "AT_COH_PTF_RUNTIME": workflow.execute_node(node) else: workflow.add_node(node) inj_file = node.output_files[0] if workflow.cp.has_option("workflow-injections", "do-align-total-spin"): align_job = LigolwCBCAlignTotalSpinExecutable(workflow.cp, 'align_total_spin', tags=curr_tags, out_dir=output_dir, ifos=ifos) node = align_job.create_node(inj_file, full_segment, curr_tags) if injection_method == "AT_COH_PTF_RUNTIME": workflow.execute_node(node) else: workflow.add_node(node) inj_file = node.output_files[0] inj_files.append(inj_file) else: err = "Injection method must be one of IN_WORKFLOW, " err += "AT_RUNTIME or PREGENERATED. Got %s." % (injection_method) raise ValueError(err) inj_tags.append(inj_tag) logging.info("Leaving injection module.") return inj_files, inj_tags
python
def checkForDuplicateInputs(rootnames): """ Check input files specified in ASN table for duplicate versions with multiple valid suffixes (_flt and _flc, for example). """ flist = [] duplist = [] for fname in rootnames: # Look for any recognized CTE-corrected products f1 = fileutil.buildRootname(fname,ext=['_flc.fits']) f2 = fileutil.buildRootname(fname) flist.append(f2) if os.path.exists(f1) and f1 != f2: # More than 1 valid input found for this rootname duplist.append(f1) return flist,duplist
java
final public int PlusMinus() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 7: jj_consume_token(7); {if (true) return Operator.PLUS;} break; case 8: jj_consume_token(8); {if (true) return Operator.MINUS;} break; default: jj_la1[6] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); }
python
def fix_e251(self, result): """Remove whitespace around parameter '=' sign.""" line_index = result['line'] - 1 target = self.source[line_index] # This is necessary since pycodestyle sometimes reports columns that # goes past the end of the physical line. This happens in cases like, # foo(bar\n=None) c = min(result['column'] - 1, len(target) - 1) if target[c].strip(): fixed = target else: fixed = target[:c].rstrip() + target[c:].lstrip() # There could be an escaped newline # # def foo(a=\ # 1) if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')): self.source[line_index] = fixed.rstrip('\n\r \t\\') self.source[line_index + 1] = self.source[line_index + 1].lstrip() return [line_index + 1, line_index + 2] # Line indexed at 1 self.source[result['line'] - 1] = fixed
python
def validate_backup_window(window): """Validate PreferredBackupWindow for DBInstance""" hour = r'[01]?[0-9]|2[0-3]' minute = r'[0-5][0-9]' r = ("(?P<start_hour>%s):(?P<start_minute>%s)-" "(?P<end_hour>%s):(?P<end_minute>%s)") % (hour, minute, hour, minute) range_regex = re.compile(r) m = range_regex.match(window) if not m: raise ValueError("DBInstance PreferredBackupWindow must be in the " "format: hh24:mi-hh24:mi") start_ts = (int(m.group('start_hour')) * 60) + int(m.group('start_minute')) end_ts = (int(m.group('end_hour')) * 60) + int(m.group('end_minute')) if abs(end_ts - start_ts) < 30: raise ValueError("DBInstance PreferredBackupWindow must be at least " "30 minutes long.") return window
java
public static String getName(String fileName){ int dot = fileName.lastIndexOf('.'); return dot==-1 ? fileName : fileName.substring(0, dot); }
python
def get_git_home(path='.'): """Get Git path from the current context.""" ctx = click.get_current_context(silent=True) if ctx and GIT_KEY in ctx.meta: return ctx.meta[GIT_KEY] from git import Repo return Repo(path, search_parent_directories=True).working_dir
java
@PostConstruct public void initialize() { try { Class.forName("org.hl7.fhir.instance.model.QuestionnaireResponse"); myValidateResponses = true; } catch (ClassNotFoundException e) { myValidateResponses = Boolean.FALSE; } }
python
def prox_soft(X, step, thresh=0): """Soft thresholding proximal operator """ thresh_ = _step_gamma(step, thresh) return np.sign(X)*prox_plus(np.abs(X) - thresh_, step)
python
def reloadFileOfCurrentItem(self, rtiRegItem=None): """ Finds the repo tree item that holds the file of the current item and reloads it. Reloading is done by removing the repo tree item and inserting a new one. The new item will have by of type rtiRegItem.cls. If rtiRegItem is None (the default), the new rtiClass will be the same as the old one. The rtiRegItem.cls will be imported. If this fails the old class will be used, and a warning will be logged. """ logger.debug("reloadFileOfCurrentItem, rtiClass={}".format(rtiRegItem)) currentIndex = self.getRowCurrentIndex() if not currentIndex.isValid(): return currentItem, _ = self.getCurrentItem() oldPath = currentItem.nodePath fileRtiIndex = self.model().findFileRtiIndex(currentIndex) isExpanded = self.isExpanded(fileRtiIndex) if rtiRegItem is None: rtiClass = None else: rtiRegItem.tryImportClass() rtiClass = rtiRegItem.cls newRtiIndex = self.model().reloadFileAtIndex(fileRtiIndex, rtiClass=rtiClass) try: # Expand and select the name with the old path _lastItem, lastIndex = self.expandPath(oldPath) self.setCurrentIndex(lastIndex) return lastIndex except Exception as ex: # The old path may not exist anymore. In that case select file RTI logger.warning("Unable to select {!r} beause of: {}".format(oldPath, ex)) self.setExpanded(newRtiIndex, isExpanded) self.setCurrentIndex(newRtiIndex) return newRtiIndex
java
public int findNode(int element) { int hash = m_hash.getHash(element); int ptr = getFirstInBucket(hash); while (ptr != -1) { int e = m_lists.getElement(ptr); if (m_hash.equal(e, element)) { return ptr; } ptr = m_lists.getNext(ptr); } return -1; }
python
def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements.""" dictionary = {} for cookie in iter(self): if (domain is None or cookie.domain == domain) and (path is None or cookie.path == path): dictionary[cookie.name] = cookie.value return dictionary
python
def connect(dbapi_connection, connection_record): """ Called once by SQLAlchemy for each new SQLite DB-API connection. Here is where we issue some PRAGMA statements to configure how we're going to access the SQLite database. @param dbapi_connection: A newly connected raw SQLite DB-API connection. @param connection_record: Unused by this method. """ try: cursor = dbapi_connection.cursor() try: cursor.execute("PRAGMA foreign_keys = ON;") cursor.execute("PRAGMA foreign_keys;") if cursor.fetchone()[0] != 1: raise Exception() finally: cursor.close() except Exception: dbapi_connection.close() raise sqlite3.Error()
java
public void addHistoryEntry(Query q) { try { Query queryCopy = q.clone(); // remove it first in order to let it appear on the beginning of the list state.getHistory().removeItem(queryCopy); state.getHistory().addItemAt(0, queryCopy); searchView.getControlPanel().getQueryPanel().updateShortHistory(); } catch(CloneNotSupportedException ex) { log.error("Can't clone the query", ex); } }
python
def collect_blocks(self): """ Collect the blocks in a list """ if self.mode == 'spark': return self.values.tordd().sortByKey().values().collect() if self.mode == 'local': return self.values.values.flatten().tolist()
java
private static int readFully(InputStream in, ByteArrayOutputStream bout, int length) throws IOException { int read = 0; byte[] buffer = new byte[2048]; while (length > 0) { int n = in.read(buffer, 0, length<2048?length:2048); if (n <= 0) { break; } bout.write(buffer, 0, n); read += n; length -= n; } return read; }
python
def sudo_command(self, command, bufsize=-1): """Sudo a command on the SSH server. Delegates to :func`~ssh.Connection.exec_command` :param command: the command to execute :type command: str :param bufsize: interpreted the same way as by the built-in C{file()} function in python :type bufsize: int :returns the stdin, stdout, and stderr of the executing command :rtype: tuple(L{ChannelFile}, L{ChannelFile}, L{ChannelFile}) :raises SSHException: if the server fails to execute the command """ new_command = "sudo {0}".format(command) return self.exec_command(new_command, bufsize)
python
def es_query_template(path): """ RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE :param path: THE NESTED PATH (NOT INCLUDING TABLE NAME) :return: (es_query, es_filters) TUPLE """ if not is_text(path): Log.error("expecting path to be a string") if path != ".": f0 = {} f1 = {} output = wrap({ "query": es_and([ f0, {"nested": { "path": path, "query": f1, "inner_hits": {"size": 100000} }} ]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0, f1]) else: f0 = {} output = wrap({ "query": es_and([f0]), "from": 0, "size": 0, "sort": [] }) return output, wrap([f0])
python
def unbind(self, queue, exchange, routing_key='', arguments={}, ticket=None, cb=None): ''' Unbind a queue from an exchange. This is always synchronous. ''' args = Writer() args.write_short(ticket or self.default_ticket).\ write_shortstr(queue).\ write_shortstr(exchange).\ write_shortstr(routing_key).\ write_table(arguments) self.send_frame(MethodFrame(self.channel_id, 50, 50, args)) self._unbind_cb.append(cb) self.channel.add_synchronous_cb(self._recv_unbind_ok)
python
def add_event_listener(self, event, function): """ Add an event listen for a particular event. Depending on the event there may or may not be parameters passed to function. Most escape streams also allow for an empty set of parameters (with a default value). Providing these default values and accepting variable arguments is the responsibility of function. More than one listener may be added for a single event. Each listener will be called. * **event** The event to listen for. * **function** The callable to invoke. """ if event not in self.listeners: self.listeners[event] = [] self.listeners[event].append(function)
java
protected <T> T parse(JsonReaderI<T> mapper) throws ParseException { this.pos = -1; T result; try { read(); result = readFirst(mapper); if (checkTaillingData) { if (!checkTaillingSpace) skipSpace(); if (c != EOI) throw new ParseException(pos - 1, ERROR_UNEXPECTED_TOKEN, c); } } catch (IOException e) { throw new ParseException(pos, e); } xs = null; xo = null; return result; }
java
public Blog blogInfo(String blogName) { Map<String, String> map = new HashMap<String, String>(); map.put("api_key", this.apiKey); return requestBuilder.get(JumblrClient.blogPath(blogName, "/info"), map).getBlog(); }
java
@Override public JobExecutionResult execute(String jobName) throws Exception { Plan p = createProgramPlan(jobName); PlanExecutor executor = PlanExecutor.createLocalExecutor(); initLogging(); return executor.executePlan(p); }
java
private boolean observeJavaObject(Object object) { // Ignore pure JS objects, this is to avoid impacting pure Vue.js components if (object.getClass() == JsObject.class) { return false; } // Don't observe Java classes if (object instanceof Class) { return true; } // Check if we have a custom Java observer for (VueGWTObserver observer : observers) { if (observer.observe(object)) { return true; } } makeStaticallyInitializedPropertiesReactive((JsObject) object, object.getClass().getCanonicalName()); return false; }
java
@Override public void clear() { // iterate over all keys in originalMap and set them to null in deltaMap for (K key : originalMap.keySet()) { deltaMap.put(key, ErasureUtils.<Collection<V>>uncheckedCast(removedValue)); } }
python
def add_operations_bulk(self, payload): """Add operations to a group of agents. :param list payload: contains the informations necessary for the action. It's in the form [{"id": agent_id, "operations": operations}] With id that is an str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. It must referenced an existing agent. With operations a list containing dictionnaries that has the form given in the craftai documentation and the configuration of the agent. :return: list of agents containing a message about the added operations. :rtype: list if dict. :raises CraftAiBadRequestError: if all of the ids are invalid or referenced non existing agents or one of the operations is invalid. """ # Check all ids, raise an error if all ids are invalid valid_indices, _, _ = self._check_agent_id_bulk(payload) valid_payload = [payload[i] for i in valid_indices] chunked_data = [] current_chunk = [] current_chunk_size = 0 for agent in valid_payload: if (agent["operations"] and isinstance(agent["operations"], list)): if current_chunk_size + len(agent["operations"]) > self.config["operationsChunksSize"]: chunked_data.append(current_chunk) current_chunk_size = 0 current_chunk = [] if len(agent["operations"]) > self.config["operationsChunksSize"]: chunked_data.append([agent]) current_chunk_size = 0 else: current_chunk_size += len(agent["operations"]) current_chunk.append(agent) if current_chunk: chunked_data.append(current_chunk) return self._add_operations_bulk(chunked_data)
python
def add_entity_errors( self, property_name, direct_errors=None, schema_errors=None ): """ Attach nested entity errors Accepts a list errors coming from validators attached directly, or a dict of errors produced by a nested schema. :param property_name: str, property name :param direct_errors: list, errors from validators attached directly :param schema_errors: dict, errors from nested schema :return: shiftschema.result.Result """ if direct_errors is None and schema_errors is None: return self # direct errors if direct_errors is not None: if property_name not in self.errors: self.errors[property_name] = dict() if 'direct' not in self.errors[property_name]: self.errors[property_name]['direct'] = [] if type(direct_errors) is not list: direct_errors = [direct_errors] for error in direct_errors: if not isinstance(error, Error): err = 'Error must be of type {}' raise x.InvalidErrorType(err.format(Error)) self.errors[property_name]['direct'].append(error) # schema errors if schema_errors is not None: if isinstance(schema_errors, Result): schema_errors = schema_errors.errors if not schema_errors: return self if property_name not in self.errors: self.errors[property_name] = dict() if 'schema' not in self.errors[property_name]: self.errors[property_name]['schema'] = schema_errors else: self.errors[property_name]['schema'] = self.merge_errors( self.errors[property_name]['schema'], schema_errors ) return self
java
public static synchronized ClassLoader getClassLoader () { final Class caller = getCallerClass (0); final ClassLoadContext ctx = new ClassLoadContext (caller); return s_strategy.getClassLoader (ctx); }
java
public Flow withEntitlements(Entitlement... entitlements) { if (this.entitlements == null) { setEntitlements(new java.util.ArrayList<Entitlement>(entitlements.length)); } for (Entitlement ele : entitlements) { this.entitlements.add(ele); } return this; }
python
def _check_global_settings(): """Makes sure that the global settings environment variable and file exist for configuration. """ global settings if settings is not None: #We must have already loaded this and everything was okay! return True from os import getenv result = False if getenv("PYCI_XML") is None: err("The environment variable PYCI_XML for the global configuration " "has not been set.") else: from os import path fullpath = path.abspath(path.expanduser(getenv("PYCI_XML"))) if not path.isfile(fullpath): err("The file {} for global configuration does not exist.".format(fullpath)) else: from pyci.config import GlobalSettings settings = GlobalSettings() result = True return result
python
def cash(self): """ [float] 可用资金 """ return sum(account.cash for account in six.itervalues(self._accounts))
java
public void setISSN(String v) { if (Journal_Type.featOkTst && ((Journal_Type)jcasType).casFeat_ISSN == null) jcasType.jcas.throwFeatMissing("ISSN", "de.julielab.jules.types.Journal"); jcasType.ll_cas.ll_setStringValue(addr, ((Journal_Type)jcasType).casFeatCode_ISSN, v);}
python
def bounds_handler(ctx, param, value): """Handle different forms of bounds.""" retval = from_like_context(ctx, param, value) if retval is None and value is not None: try: value = value.strip(", []") retval = tuple(float(x) for x in re.split(r"[,\s]+", value)) assert len(retval) == 4 return retval except Exception: raise click.BadParameter( "{0!r} is not a valid bounding box representation".format(value) ) else: # pragma: no cover return retval
java
public static ClassLoader getSystemToolClassLoader() { try { Class<? extends JavaCompiler> c = instance().getSystemToolClass(JavaCompiler.class, defaultJavaCompilerName); return c.getClassLoader(); } catch (Throwable e) { return trace(WARNING, e); } }
java
public boolean printData(PrintWriter out, int iPrintOptions) { this.addHiddenParam(out, TrxMessageHeader.LOG_TRX_ID, this.getProperty(TrxMessageHeader.LOG_TRX_ID)); return super.printData(out, iPrintOptions); // Don't print }
python
def create_regular_expression(self, regexp): """ Create a regular expression for this inspection situation context. The inspection situation must be using an inspection context that supports regex. :param str regexp: regular expression string :raises CreateElementFailed: failed to modify the situation """ for parameter in self.situation_context.situation_parameters: if parameter.type == 'regexp': return self.add_parameter_value( 'reg_exp_situation_parameter_values', **{'parameter_ref': parameter.href, 'reg_exp': regexp}) # Treat as raw string raise CreateElementFailed('The situation does not support a regular ' 'expression as a context value.')
python
def insert_short(self, index, value): """Inserts an unsigned short in a certain position in the packet""" format = '!H' self.data.insert(index, struct.pack(format, value)) self.size += 2
python
async def _dump_tuple(self, writer, elem, elem_type, params=None): """ Dumps tuple of elements to the writer. :param writer: :param elem: :param elem_type: :param params: :return: """ if len(elem) != len(elem_type.f_specs()): raise ValueError( "Fixed size tuple has not defined size: %s" % len(elem_type.f_specs()) ) await dump_uvarint(writer, len(elem)) elem_fields = params[0] if params else None if elem_fields is None: elem_fields = elem_type.f_specs() for idx, elem in enumerate(elem): try: self.tracker.push_index(idx) await self.dump_field( writer, elem, elem_fields[idx], params[1:] if params else None ) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e
python
def convolve(input, weights, mask=None, slow=False): """2 dimensional convolution. This is a Python implementation of what will be written in Fortran. Borders are handled with reflection. Masking is supported in the following way: * Masked points are skipped. * Parts of the input which are masked have weight 0 in the kernel. * Since the kernel as a whole needs to have value 1, the weights of the masked parts of the kernel are evenly distributed over the non-masked parts. Adapted from https://github.com/nicjhan/gaussian-filter """ assert (len(input.shape) == 2) assert (len(weights.shape) == 2) # Only one reflection is done on each side so the weights array cannot be # bigger than width/height of input +1. assert (weights.shape[0] < input.shape[0] + 1) assert (weights.shape[1] < input.shape[1] + 1) if mask is not None: # The slow convolve does not support masking. assert (not slow) assert (input.shape == mask.shape) tiled_mask = tile_and_reflect(mask) output = np.copy(input) tiled_input = tile_and_reflect(input) rows = input.shape[0] cols = input.shape[1] # Stands for half weights row. hw_row = np.int(weights.shape[0] / 2) hw_col = np.int(weights.shape[1] / 2) # Stands for full weights row. fw_row = weights.shape[0] fw_col = weights.shape[0] # Now do convolution on central array. # Iterate over tiled_input. for i, io in zip(list(range(rows, rows * 2)), list(range(rows))): for j, jo in zip(list(range(cols, cols * 2)), list(range(cols))): # The current central pixel is at (i, j) # Skip masked points. if mask is not None and tiled_mask[i, j]: continue average = 0.0 if slow: # Iterate over weights/kernel. for k in range(weights.shape[0]): for l in range(weights.shape[1]): # Get coordinates of tiled_input array that match given # weights m = i + k - hw_row n = j + l - hw_col average += tiled_input[m, n] * weights[k, l] else: # Find the part of the tiled_input array that overlaps with the # weights array. overlapping = tiled_input[ i - hw_row:i - hw_row + fw_row, j - hw_col:j - hw_col + fw_col] assert (overlapping.shape == weights.shape) # If any of 'overlapping' is masked then set the corresponding # points in the weights matrix to 0 and redistribute these to # non-masked points. if mask is not None: overlapping_mask = tiled_mask[ i - hw_row:i - hw_row + fw_row, j - hw_col:j - hw_col + fw_row] assert (overlapping_mask.shape == weights.shape) # Total value and number of weights clobbered by the mask. clobber_total = np.sum(weights[overlapping_mask]) remaining_num = np.sum(np.logical_not(overlapping_mask)) # This is impossible since at least i, j is not masked. assert (remaining_num > 0) correction = clobber_total / remaining_num # It is OK if nothing is masked - the weights will not be # changed. if correction == 0: assert (not overlapping_mask.any()) # Redistribute to non-masked points. tmp_weights = np.copy(weights) tmp_weights[overlapping_mask] = 0.0 tmp_weights[np.where(tmp_weights != 0)] += correction # Should be very close to 1. May not be exact due to # rounding. assert (abs(np.sum(tmp_weights) - 1) < 1e-15) else: tmp_weights = weights merged = tmp_weights[:] * overlapping average = np.sum(merged) # Set new output value. output[io, jo] = average return output
java
public <T> void serialize(SerializerContext serializerContext, ElementDescriptor<T> descriptor, T rootObject) throws SerializerException, IOException { serializerContext.serializer.startDocument(null, null); useNamespace(serializerContext, descriptor.qualifiedName); bindNamespaces(serializerContext); mChildWriter.writeChild(descriptor, rootObject, serializerContext); serializerContext.serializer.endDocument(); }
java
private static Iterator<?> computeIteratedObjectIterator(final Object iteratedObject) { if (iteratedObject == null) { return Collections.EMPTY_LIST.iterator(); } if (iteratedObject instanceof Collection<?>) { return ((Collection<?>)iteratedObject).iterator(); } if (iteratedObject instanceof Map<?,?>) { return ((Map<?,?>)iteratedObject).entrySet().iterator(); } if (iteratedObject.getClass().isArray()) { return new Iterator<Object>() { protected final Object array = iteratedObject; protected final int length = Array.getLength(this.array); private int i = 0; public boolean hasNext() { return this.i < this.length; } public Object next() { return Array.get(this.array, i++); } public void remove() { throw new UnsupportedOperationException("Cannot remove from an array iterator"); } }; } if (iteratedObject instanceof Iterable<?>) { return ((Iterable<?>)iteratedObject).iterator(); } if (iteratedObject instanceof Iterator<?>) { return (Iterator<?>)iteratedObject; } if (iteratedObject instanceof Enumeration<?>) { return new Iterator<Object>() { protected final Enumeration<?> enumeration = (Enumeration<?>)iteratedObject; public boolean hasNext() { return this.enumeration.hasMoreElements(); } public Object next() { return this.enumeration.nextElement(); } public void remove() { throw new UnsupportedOperationException("Cannot remove from an Enumeration iterator"); } }; } return Collections.singletonList(iteratedObject).iterator(); }
java
protected final void addRepeatable(String annotationName, io.micronaut.core.annotation.AnnotationValue annotationValue) { if (StringUtils.isNotEmpty(annotationName) && annotationValue != null) { Map<String, Map<CharSequence, Object>> allAnnotations = getAllAnnotations(); addRepeatableInternal(annotationName, annotationValue, allAnnotations); } }
python
def loadLayer(filename, name = None, provider=None): ''' Tries to load a layer from the given file :param filename: the path to the file to load. :param name: the name to use for adding the layer to the current project. If not passed or None, it will use the filename basename ''' name = name or os.path.splitext(os.path.basename(filename))[0] if provider != 'gdal': # QGIS3 crashes if opening a raster as vector ... this needs further investigations qgslayer = QgsVectorLayer(filename, name, provider or "ogr") if provider == 'gdal' or not qgslayer.isValid(): qgslayer = QgsRasterLayer(filename, name, provider or "gdal") if not qgslayer.isValid(): raise RuntimeError('Could not load layer: ' + unicode(filename)) return qgslayer
java
public TupleRefBuilder bindings( Stream<VarBinding> bindings) { bindings.forEach( binding -> tupleRef_.addVarBinding( binding)); return this; }