language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static Builder from(GatewayMessage message) { return new Builder().headers(message.headers).data(message.data); }
python
def make_report(self): """ Makes a html report and saves it into the same folder where logs are stored. """ if self.execution_stats is None: raise RuntimeError('Cannot produce a report without running the executor first, check EOExecutor.run ' 'method') if os.environ.get('DISPLAY', '') == '': LOGGER.info('No display found, using non-interactive Agg backend') plt.switch_backend('Agg') dependency_graph = self._create_dependency_graph() task_descriptions = self._get_task_descriptions() formatter = HtmlFormatter(linenos=True) task_source = self._render_task_source(formatter) execution_stats = self._render_execution_errors(formatter) template = self._get_template() html = template.render(dependency_graph=dependency_graph, task_descriptions=task_descriptions, task_source=task_source, execution_stats=execution_stats, execution_logs=self.execution_logs, code_css=formatter.get_style_defs()) if not os.path.isdir(self.report_folder): os.mkdir(self.report_folder) with open(self.get_report_filename(), 'w') as fout: fout.write(html)
java
public static <V> String printNodeTree(ParsingResult<V> parsingResult, Predicate<Node<V>> nodeFilter, Predicate<Node<V>> subTreeFilter) { checkArgNotNull(parsingResult, "parsingResult"); checkArgNotNull(nodeFilter, "nodeFilter"); checkArgNotNull(subTreeFilter, "subTreeFilter"); return printTree(parsingResult.parseTreeRoot, new NodeFormatter<V>(parsingResult.inputBuffer), nodeFilter, subTreeFilter); }
python
def validate(ref_voicing, ref_cent, est_voicing, est_cent): """Checks that voicing and frequency arrays are well-formed. To be used in conjunction with :func:`mir_eval.melody.validate_voicing` Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents """ if ref_cent.size == 0: warnings.warn("Reference frequency array is empty.") if est_cent.size == 0: warnings.warn("Estimated frequency array is empty.") # Make sure they're the same length if ref_voicing.shape[0] != ref_cent.shape[0] or \ est_voicing.shape[0] != est_cent.shape[0] or \ ref_cent.shape[0] != est_cent.shape[0]: raise ValueError('All voicing and frequency arrays must have the ' 'same length.')
java
public JPanel addToolbar(JComponent screen, JComponent toolbar) { JPanel panelMain = new JPanel(); panelMain.setOpaque(false); panelMain.setLayout(new BoxLayout(panelMain, BoxLayout.Y_AXIS)); panelMain.add(toolbar); toolbar.setAlignmentX(0); panelMain.add(screen); screen.setAlignmentX(0); return panelMain; }
java
private boolean load(Reader reader) { // Read VTIMEZONE block into string array try { vtzlines = new LinkedList<String>(); boolean eol = false; boolean start = false; boolean success = false; StringBuilder line = new StringBuilder(); while (true) { int ch = reader.read(); if (ch == -1) { // end of file if (start && line.toString().startsWith(ICAL_END_VTIMEZONE)) { vtzlines.add(line.toString()); success = true; } break; } if (ch == 0x0D) { // CR, must be followed by LF by the definition in RFC2445 continue; } if (eol) { if (ch != 0x09 && ch != 0x20) { // NOT followed by TAB/SP -> new line if (start) { if (line.length() > 0) { vtzlines.add(line.toString()); } } line.setLength(0); if (ch != 0x0A) { line.append((char)ch); } } eol = false; } else { if (ch == 0x0A) { // LF eol = true; if (start) { if (line.toString().startsWith(ICAL_END_VTIMEZONE)) { vtzlines.add(line.toString()); success = true; break; } } else { if (line.toString().startsWith(ICAL_BEGIN_VTIMEZONE)) { vtzlines.add(line.toString()); line.setLength(0); start = true; eol = false; } } } else { line.append((char)ch); } } } if (!success) { return false; } } catch (IOException ioe) { ///CLOVER:OFF return false; ///CLOVER:ON } return parse(); }
python
def select(self, *cluster_ids): """Select a list of clusters.""" # HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])` # This makes it more convenient to select multiple clusters with # the snippet: `:c 1 2 3` instead of `:c 1,2,3`. if cluster_ids and isinstance(cluster_ids[0], (tuple, list)): cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:]) # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) # Update the cluster view selection. self.cluster_view.select(cluster_ids)
java
public void delete(String resourceGroupName, String applicationSecurityGroupName) { deleteWithServiceResponseAsync(resourceGroupName, applicationSecurityGroupName).toBlocking().last().body(); }
java
public void put(double x, double y, double w) { if(w == 0.) { return; } if(sumWe <= 0.) { sumX = x * w; sumY = y * w; sumWe = w; return; } // Delta to previous mean final double deltaX = x * sumWe - sumX; final double deltaY = y * sumWe - sumY; final double oldWe = sumWe; // Incremental update sumWe += w; final double f = w / (sumWe * oldWe); // Update sumXX += f * deltaX * deltaX; sumYY += f * deltaY * deltaY; // should equal weight * deltaY * neltaX! sumXY += f * deltaX * deltaY; // Update means sumX += x * w; sumY += y * w; }
java
public void onApplicationEvent(ApplicationEvent event) { // All events we care about are subtypes of ClientSecurityEvent if( event instanceof ClientSecurityEvent ) { Authentication authentication = (Authentication) event.getSource(); if( logger.isDebugEnabled() ) { logger.debug( "RECEIVED ClientSecurityEvent: " + event ); logger.debug( "Authentication token: " + authentication ); } // Note that we need to inspect the new authentication token and see if it is // NO_AUTHENTICATION. If so, then we need to use null instead. This little // dance is required because the source of an event can't actually be null. if( authentication == ClientSecurityEvent.NO_AUTHENTICATION ) { if( logger.isDebugEnabled() ) { logger.debug( "Converted NO_AUTHENTICATION to null" ); } authentication = null; } // And dispatch according to the event type. if( event instanceof AuthenticationEvent ) { broadcastAuthentication( authentication ); } else if( event instanceof LoginEvent ) { broadcastLogin( authentication ); } else if( event instanceof LogoutEvent ) { broadcastLogout( authentication ); } else { if( logger.isDebugEnabled() ) { logger.debug( "Unsupported event not processed" ); } } } }
python
def handle_string_response(self, call_id, payload): """Handler for response `StringResponse`. This is the response for the following requests: 1. `DocUriAtPointReq` or `DocUriForSymbolReq` 2. `DebugToStringReq` """ self.log.debug('handle_string_response: in [typehint: %s, call ID: %s]', payload['typehint'], call_id) # :EnDocBrowse or :EnDocUri url = payload['text'] if not url.startswith('http'): port = self.ensime.http_port() url = gconfig['localhost'].format(port, url) options = self.call_options.get(call_id) if options and options.get('browse'): self._browse_doc(url) del self.call_options[call_id] else: # TODO: make this return value of a Vim function synchronously, how? self.log.debug('EnDocUri %s', url) return url
java
private void checkConsistency(StorageInfo remote, StorageInfo local, boolean image, Object name) throws IOException { if (!remote.equals(local)) { throwIOException("Remote " + (image ? "image" : "edits") + " storage is different than local. Local: (" + local.toColonSeparatedString() + "), remote: " + name.toString() + " (" + remote.toColonSeparatedString() + ")"); } }
python
def get(self, url, data=None): """ Executes an HTTP GET request for the given URL. ``data`` should be a dictionary of url parameters """ response = self.http.get(url, headers=self.headers, params=data, **self.requests_params) return self.process(response)
python
def save(self, filename): '''save settings to a file. Return True/False on success/failure''' try: f = open(filename, mode='w') except Exception: return False for k in self.list(): f.write("%s=%s\n" % (k, self.get(k))) f.close() return True
java
public void setLocalSecondaryIndexes(java.util.Collection<LocalSecondaryIndexInfo> localSecondaryIndexes) { if (localSecondaryIndexes == null) { this.localSecondaryIndexes = null; return; } this.localSecondaryIndexes = new java.util.ArrayList<LocalSecondaryIndexInfo>(localSecondaryIndexes); }
java
public static CPOptionValue fetchByCompanyId_First(long companyId, OrderByComparator<CPOptionValue> orderByComparator) { return getPersistence() .fetchByCompanyId_First(companyId, orderByComparator); }
python
def neighbor(self, **kwargs): """Experimental neighbor method. Args: ip_addr (str): IP Address of BGP neighbor. remote_as (str): Remote ASN of BGP neighbor. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. afis (list): A list of AFIs to configure. Do not include IPv4 or IPv6 unicast as these are inferred from the `ip_addr` parameter. delete (bool): Deletes the neighbor if `delete` is ``True``. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `remote_as` or `ip_addr` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.203', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.local_asn(local_as='65535', ... rbridge_id='225') ... output = dev.bgp.neighbor(ip_addr='10.10.10.10', ... remote_as='65535', rbridge_id='225') ... output = dev.bgp.neighbor(remote_as='65535', ... rbridge_id='225', ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') ... output = dev.bgp.neighbor(ip_addr='10.10.10.10', ... delete=True, rbridge_id='225', remote_as='65535') ... output = dev.bgp.neighbor(remote_as='65535', ... rbridge_id='225', delete=True, ... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1') """ ip_addr = ip_interface(unicode(kwargs.pop('ip_addr'))) rbridge_id = kwargs.pop('rbridge_id', '1') delete = kwargs.pop('delete', False) callback = kwargs.pop('callback', self._callback) remote_as = kwargs.pop('remote_as', None) get_config = kwargs.pop('get', False) if not get_config and remote_as is None: raise ValueError('When configuring a neighbor, you must specify ' 'its remote-as.') neighbor_args = dict(router_bgp_neighbor_address=str(ip_addr.ip), remote_as=remote_as, rbridge_id=rbridge_id) if ip_addr.version == 6: neighbor_args['router_bgp_neighbor_ipv6_address'] = str(ip_addr.ip) neighbor, ip_addr_path = self._unicast_xml(ip_addr.version) config = neighbor(**neighbor_args) if ip_addr.version == 6 and not delete: config = self._build_ipv6(ip_addr, config, rbridge_id) if delete and config.find(ip_addr_path) is not None: if ip_addr.version == 4: config.find(ip_addr_path).set('operation', 'delete') config.find('.//*router-bgp-neighbor-address').set('operation', 'delete') elif ip_addr.version == 6: config.find(ip_addr_path).set('operation', 'delete') config.find('.//*router-bgp-neighbor-ipv6-address').set( 'operation', 'delete') if get_config: return callback(config, handler='get_config') return callback(config)
java
public FormValidation doCheckName(@QueryParameter String value) throws IOException, ServletException { Jenkins.getInstance().checkPermission(Computer.CREATE); if(Util.fixEmpty(value)==null) return FormValidation.ok(); try { checkName(value); return FormValidation.ok(); } catch (Failure e) { return FormValidation.error(e.getMessage()); } }
python
def getobject(bunchdt, key, name): """get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one""" # TODO : throw exception if more than one object, or return more objects idfobjects = bunchdt[key] if idfobjects: # second item in list is a unique ID unique_id = idfobjects[0].objls[1] theobjs = [idfobj for idfobj in idfobjects if idfobj[unique_id].upper() == name.upper()] try: return theobjs[0] except IndexError: return None
java
public String substituteLinkForUnknownTarget( CmsObject cms, String link, String targetDetailPage, boolean forceSecure) { if (CmsStringUtil.isEmpty(link)) { return ""; } String sitePath = link; String siteRoot = null; if (hasScheme(link)) { // the link has a scheme, that is starts with something like "http://" // usually this should be a link to an external resource, but check anyway sitePath = getRootPath(cms, link); if (sitePath == null) { // probably an external link, don't touch this return link; } } // check if we can find a site from the link siteRoot = OpenCms.getSiteManager().getSiteRoot(sitePath); if (siteRoot == null) { // use current site root in case no valid site root is available // this will also be the case if a "/system" link is used siteRoot = cms.getRequestContext().getSiteRoot(); } else { // we found a site root, cut this from the resource path sitePath = sitePath.substring(siteRoot.length()); } return substituteLink(cms, sitePath, siteRoot, targetDetailPage, forceSecure); }
java
private boolean hasStep(final SlideContent slideContent) { boolean res = false; for (final S step : getStepList()) { if (step.name().equalsIgnoreCase(slideContent.getName())) { res = true; } } return res; }
python
def create_template(self, s, provider_name=None): """Creates a template from the given string based on the specified provider or the provider with highest precedence. Args: s: The string to convert to a template. provider_name: The name of the provider to use to create the template. """ if provider_name is None: provider_name = self.supported_providers[0] return template_exception_handler( lambda: self.get_provider(provider_name).create_template(s), self.error_context )
java
private static double getX(Coordinate c) { double value = c.getX(); if (value > 1000000) { value = 1000000; } else if (value < -1000000) { value = -1000000; } return value; }
java
public Matrix4d rotateAround(Quaterniondc quat, double ox, double oy, double oz) { return rotateAround(quat, ox, oy, oz, this); }
java
@Override public CommerceShippingFixedOptionRel findByPrimaryKey( Serializable primaryKey) throws NoSuchShippingFixedOptionRelException { CommerceShippingFixedOptionRel commerceShippingFixedOptionRel = fetchByPrimaryKey(primaryKey); if (commerceShippingFixedOptionRel == null) { if (_log.isDebugEnabled()) { _log.debug(_NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey); } throw new NoSuchShippingFixedOptionRelException(_NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey); } return commerceShippingFixedOptionRel; }
python
def check_exists(filename, oappend=False): """ Avoid overwriting some files accidentally. """ if op.exists(filename): if oappend: return oappend logging.error("`{0}` found, overwrite (Y/N)?".format(filename)) overwrite = (raw_input() == 'Y') else: overwrite = True return overwrite
java
public static NotCondition.Builder not(Condition.Builder conditionBuilder) { return NotCondition.builder().condition(conditionBuilder); }
java
private void initShadowResponsiveEffectEnabled(TypedArray attrs) { int index = R.styleable.ActionButton_shadowResponsiveEffect_enabled; if (attrs.hasValue(index)) { shadowResponsiveEffectEnabled = attrs.getBoolean(index, shadowResponsiveEffectEnabled); LOGGER.trace("Initialized Action Button Shadow Responsive Effect enabled: {}", isShadowResponsiveEffectEnabled()); } }
java
@SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { java.awt.GridBagConstraints gridBagConstraints; jScrollPane1 = new javax.swing.JScrollPane(); textArea = new javax.swing.JTextArea(); jPanel1 = new javax.swing.JPanel(); jLabel1 = new javax.swing.JLabel(); comboBoxName = new javax.swing.JComboBox<>(); jLabel2 = new javax.swing.JLabel(); comboBoxStyle = new javax.swing.JComboBox<>(); jLabel3 = new javax.swing.JLabel(); comboBoxSize = new javax.swing.JComboBox<>(); setLayout(new java.awt.GridBagLayout()); textArea.setColumns(20); textArea.setRows(5); jScrollPane1.setViewportView(textArea); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 0; gridBagConstraints.gridy = 1; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.ipadx = 404; gridBagConstraints.ipady = 224; gridBagConstraints.anchor = java.awt.GridBagConstraints.NORTHWEST; gridBagConstraints.weightx = 1.0; gridBagConstraints.weighty = 1.0; add(jScrollPane1, gridBagConstraints); jPanel1.setLayout(new java.awt.GridBagLayout()); jLabel1.setText("Name:"); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 0; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.insets = new java.awt.Insets(0, 16, 0, 0); jPanel1.add(jLabel1, gridBagConstraints); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 1; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.ipadx = 80; jPanel1.add(comboBoxName, gridBagConstraints); jLabel2.setText("Style:"); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 2; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.insets = new java.awt.Insets(0, 16, 0, 0); jPanel1.add(jLabel2, gridBagConstraints); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 3; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.ipadx = 80; jPanel1.add(comboBoxStyle, gridBagConstraints); jLabel3.setText("Size:"); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 4; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.insets = new java.awt.Insets(0, 16, 0, 0); jPanel1.add(jLabel3, gridBagConstraints); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.gridx = 5; gridBagConstraints.gridy = 0; gridBagConstraints.fill = java.awt.GridBagConstraints.BOTH; gridBagConstraints.ipadx = 80; jPanel1.add(comboBoxSize, gridBagConstraints); gridBagConstraints = new java.awt.GridBagConstraints(); gridBagConstraints.insets = new java.awt.Insets(8, 0, 8, 0); add(jPanel1, gridBagConstraints); }
python
def get(self, request): """ Returns a json representing the menu voices in a format eaten by the js menu. Raised ImproperlyConfigured exceptions can be viewed in the browser console """ self.app_list = site.get_app_list(request) self.apps_dict = self.create_app_list_dict() # no menu provided items = get_config('MENU') if not items: voices = self.get_default_voices() else: voices = [] for item in items: self.add_voice(voices, item) return JsonResponse(voices, safe=False)
java
public static void copy(final InputStream inputStream, final OutputStream outputStream) throws IOException { new ByteSource() { @Override public InputStream openStream() { return inputStream; } }.copyTo(new ByteSink() { @Override public OutputStream openStream() { return outputStream; } }); }
java
void get_features(final State s, final List<Integer> cluster4, final List<Integer> cluster6, final List<Integer> cluster, List<Integer> features) { Context ctx = new Context(); get_context(s, ctx); get_basic_features(ctx, s.ref.forms, s.ref.postags, s.deprels, features); get_distance_features(ctx, features); get_valency_features(ctx, s.nr_left_children, s.nr_right_children, features); get_cluster_features(ctx, cluster4, cluster6, cluster, features); }
java
public static filterglobal_binding get(nitro_service service) throws Exception{ filterglobal_binding obj = new filterglobal_binding(); filterglobal_binding response = (filterglobal_binding) obj.get_resource(service); return response; }
python
def configure(self, address): """Configure socket's addresses with nanoconfig""" global nanoconfig_started if len(self._endpoints): raise ValueError("Nanoconfig address must be sole endpoint") endpoint_id = _nn_check_positive_rtn( wrapper.nc_configure(self.fd, address) ) if not nanoconfig_started: nanoconfig_started = True ep = Socket.NanoconfigEndpoint(self, endpoint_id, address) self._endpoints.append(ep) return ep
java
private int getIsoLevel(String isoLevel) { if (isoLevel.equalsIgnoreCase(LITERAL_IL_READ_UNCOMMITTED)) { return IL_READ_UNCOMMITTED; } else if (isoLevel.equalsIgnoreCase(LITERAL_IL_READ_COMMITTED)) { return IL_READ_COMMITTED; } else if (isoLevel.equalsIgnoreCase(LITERAL_IL_REPEATABLE_READ)) { return IL_REPEATABLE_READ; } else if (isoLevel.equalsIgnoreCase(LITERAL_IL_SERIALIZABLE)) { return IL_SERIALIZABLE; } else if (isoLevel.equalsIgnoreCase(LITERAL_IL_OPTIMISTIC)) { return IL_OPTIMISTIC; } //logger.warn("unknown isolation-level: " + isoLevel + " using RW_UNCOMMITTED as default"); return defIsoLevel; }
python
def recv_raw(self, x=MTU): """Receives a packet, then returns a tuple containing (cls, pkt_data, time)""" # noqa: E501 ll = self.ins.datalink() if ll in conf.l2types: cls = conf.l2types[ll] else: cls = conf.default_l2 warning("Unable to guess datalink type (interface=%s linktype=%i). Using %s", # noqa: E501 self.iface, ll, cls.name) pkt = None while pkt is None: pkt = self.ins.next() if pkt is not None: ts, pkt = pkt if pkt is None and scapy.consts.WINDOWS: raise TimeoutElapsed # To understand this behavior, have a look at L2pcapListenSocket's note # noqa: E501 if pkt is None: return None, None, None return cls, pkt, ts
java
@Override public int compare(S solution1, S solution2) { if (solution1 == null) { return 1; } else if (solution2 == null) { return -1; } int dominate1; // dominate1 indicates if some objective of solution1 // dominates the same objective in solution2. dominate2 int dominate2; // is the complementary of dominate1. dominate1 = 0; dominate2 = 0; int flag; double value1, value2; for (int i = 0; i < solution1.getNumberOfObjectives(); i++) { value1 = solution1.getObjective(i); value2 = solution2.getObjective(i); if (value1 < value2) { flag = -1; } else if (value1 > value2) { flag = 1; } else { flag = 0; } if (flag == -1) { dominate1 = 1; } if (flag == 1) { dominate2 = 1; } } if (dominate1 == 0 && dominate2 == 0) { //No one dominates the other return 0; } if (dominate1 == 1) { // solution1 dominates return -1; } else if (dominate2 == 1) { // solution2 dominates return 1; } return 2; }
java
public String getUUID() { StringBuilder uuid = new StringBuilder(36); int i = (int)System.currentTimeMillis(); int j = seed.nextInt(); hexFormat(i,uuid); uuid.append(fixedPart); hexFormat(j,uuid); return uuid.toString(); }
java
public String getSemanticTypes(int i) { if (OntClassMention_Type.featOkTst && ((OntClassMention_Type)jcasType).casFeat_semanticTypes == null) jcasType.jcas.throwFeatMissing("semanticTypes", "de.julielab.jules.types.OntClassMention"); jcasType.jcas.checkArrayBounds(jcasType.ll_cas.ll_getRefValue(addr, ((OntClassMention_Type)jcasType).casFeatCode_semanticTypes), i); return jcasType.ll_cas.ll_getStringArrayValue(jcasType.ll_cas.ll_getRefValue(addr, ((OntClassMention_Type)jcasType).casFeatCode_semanticTypes), i);}
python
def create_choice_model(data, alt_id_col, obs_id_col, choice_col, specification, model_type, intercept_ref_pos=None, shape_ref_pos=None, names=None, intercept_names=None, shape_names=None, nest_spec=None, mixing_id_col=None, mixing_vars=None): """ Parameters ---------- data : string or pandas dataframe. If `data` is a string, it should be an absolute or relative path to a CSV file containing the long format data for this choice model. Note long format has one row per available alternative for each observation. If `data` is a pandas dataframe, `data` should already be in long format. alt_id_col : string. Should denote the column in data that contains the alternative identifiers for each row. obs_id_col : string. Should denote the column in data that contains the observation identifiers for each row. choice_col : string. Should denote the column in data which contains the ones and zeros that denote whether or not the given row corresponds to the chosen alternative for the given individual. specification : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `all_diff` or `all_same`. If a list, the elements should be: 1) single objects that are within the alternative ID column of `long_form_df` 2) lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). model_type : string. Denotes the model type of the choice_model being instantiated. Should be one of the following values: - "MNL" - "Asym" - "Cloglog" - "Scobit" - "Uneven" - "Nested Logit" - "Mixed Logit" intercept_ref_pos : int, optional. Valid only when the intercepts being estimated are not part of the index. Specifies the alternative in the ordered array of unique alternative ids whose intercept or alternative-specific constant is not estimated, to ensure model identifiability. Default == None. shape_ref_pos : int, optional. Specifies the alternative in the ordered array of unique alternative ids whose shape parameter is not estimated, to ensure model identifiability. Default == None. names : OrderedDict or None, optional. Should have the same keys as `specification`. For each key: - if the corresponding value in `specification` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification`. Default == None. intercept_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternatives in data, minus 1. Each element of the list should be the name of the corresponding alternative's intercept term, in sorted order of the possible alternative IDs. If None is passed, the resulting names that are shown in the estimation results will be ["Outside_ASC_{}".format(x) for x in shape_names]. Default = None. shape_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternative IDs in data. Each element of the list should be a string denoting the name of the corresponding alternative, in sorted order of the possible alternative IDs. The resulting names which are shown in the estimation results will be ["shape_{}".format(x) for x in shape_names]. Default = None. nest_spec : OrderedDict or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id only be associated with a single nest! Default == None. mixing_id_col : str, or None, optional. Should be a column heading in `data`. Should denote the column in `data` which contains the identifiers of the units of observation over which the coefficients of the model are thought to be randomly distributed. If `model_type == "Mixed Logit"`, then `mixing_id_col` must be passed. Default == None. mixing_vars : list, or None, optional. All elements of the list should be strings. Each string should be present in the values of `names.values()` and they're associated variables should only be index variables (i.e. part of the design matrix). If `model_type == "Mixed Logit"`, then `mixing_vars` must be passed. Default == None. Returns ------- model_obj : instantiation of the Choice Model Class corresponding to the model type passed as the function argument. The returned object will have been instantiated with the arguments passed to this function. """ # Make sure the model type is valid ensure_valid_model_type(model_type, valid_model_types) # Carry out the appropriate instantiation process for the chosen # choice model model_kwargs = {"intercept_ref_pos": intercept_ref_pos, "shape_ref_pos": shape_ref_pos, "names": names, "intercept_names": intercept_names, "shape_names": shape_names, "nest_spec": nest_spec, "mixing_id_col": mixing_id_col, "mixing_vars": mixing_vars} return model_type_to_class[model_type](data, alt_id_col, obs_id_col, choice_col, specification, **model_kwargs)
java
public List<Object> receiveAndConvertBatch(Destination destination, int batchSize) throws JmsException { List<Message> messages = receiveBatch(destination, batchSize); List<Object> result = new ArrayList<Object>(messages.size()); for (Message next : messages) { result.add(doConvertFromMessage(next)); } return result; }
java
public static <I extends ImageGray<I>, D extends ImageGray<D>> PointTracker<I> klt(int scaling[], ConfigGeneralDetector configExtract, int featureRadius, Class<I> imageType, Class<D> derivType) { PkltConfig config = new PkltConfig(); config.pyramidScaling = scaling; config.templateRadius = featureRadius; return klt(config, configExtract, imageType, derivType ); }
python
def _expandOlemaVerbChains( clauseTokens, clauseID, foundChains ): ''' Meetod, mis proovib laiendada 'olema'-l6pulisi (predikaadi) verbiahelaid, lisades võimalusel nende otsa teisi verbe, nt "on olnud" + "tehtud", "ei olnud" + "tehtud", "ei oleks" + "arvatud"; Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains; ''' verbOle = WordTemplate({ROOT:'^ole$',POSTAG:'V'}) verbOleJarel1 = WordTemplate({POSTAG:'V',FORM:'(nud)$'}) verbOleJarel2 = WordTemplate({POSTAG:'V',FORM:'^(mas|tud)$'}) verbMata = WordTemplate({POSTAG:'V',FORM:'^(mata)$'}) verbMaDa = WordTemplate({POSTAG:'V',FORM:'^(da|ma)$'}) # J22dvustame s6nad, mis kuuluvad juba mingi tuvastatud verbifraasi koosseisu annotatedWords = [] for verbObj in foundChains: if verbObj[CLAUSE_IDX] != clauseID: continue if (len(verbObj[PATTERN])==1 and re.match('^(ei|ära|ega)$', verbObj[PATTERN][0])): # V2lja j22vad yksikuna esinevad ei/ära/ega, kuna need tõenäoliselt ei sega continue annotatedWords.extend( verbObj[PHRASE] ) for verbObj in foundChains: if verbObj[CLAUSE_IDX] != clauseID: continue if verbObj[PATTERN][-1] == 'ole' and verbObj[OTHER_VERBS]: # # Kui on tegemist 'olema' l6pulise verbiahelaga, mille kontekstis on teisi verbe, # st saab veel laiendada ... # eiOlePattern = (len(verbObj[PATTERN])==2 and verbObj[PATTERN][0] == 'ei') lastVerbWID = verbObj[PHRASE][-1] lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID] lastTokIndex = lastTokIndex[0] expansion = None appliedRule = 0 if not _isClauseFinal( lastVerbWID, clauseTokens ): maDaVerbsBetween = 0 oleInfFollowing = 0 for i in range(lastTokIndex + 1, len(clauseTokens)): token = clauseTokens[i] tokenWID = token[WORD_ID] if tokenWID in annotatedWords: break if verbMaDa.matches(token): maDaVerbsBetween += 1 if (verbOleJarel1.matches(token)) or verbOleJarel2.matches(token): # # Heuristik: # Kui olema j2rel, osalause l6pus on nud/tud/mas ja nende vahel pole yhtegi # punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud verbiahelat # ega teist nud/tud/mas s6na, loeme selle s6na olema-fraasi laienduseks: # # Pere ei_0 ole_0 Eestis toimuvast vaimustatud_0 . # " Viimasel ajal on_0 see asi jälle susisema hakanud_0 " , # Esiteks ei_0 olnud_0 vajalikul ajal tavaliselt bussi tulemas_0 # if _isClauseFinal(tokenWID, clauseTokens ) and \ not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \ tokenWID, True, True, True): expansion = token # Veakoht: kui -mas j2rel on da/ma, pole kindel, et tegu otsese rektsiooniseosega: # Islamlannale on_0 harjumatu näha meest midagi maast korjamas_0 , elif verbOleJarel1.matches(token) and eiOlePattern and i-lastTokIndex<=2: # # Heuristik: "ei"+"ole"-ahela j2rel "nud" ning nende vahel pole rohkem kui # yks muu s6na: # Tagantjärele mõeldes ei_0 oleks_0 ma pidanud_0 seda tegema . # Mina ei_0 ole_0 suutnud_0 siiani maad osta . # expansion = token oleInfFollowing += 1 break elif verbMata.matches(token) and maDaVerbsBetween == 0: # # Heuristik: # Kui olema j2rel, osalause l6pus on mata ja nende vahel pole yhtegi # punktuatsioonim2rki, sides6na, adverbe aga/kuid/vaid, juba m2rgendatud # verbiahelat, m6nd nud/tud/mas/ma/da verbi, loeme selle s6na olema-fraasi # laienduseks: # # Maanaine on_0 veel leidmata_0 . # linnaarhitekti koht oli_0 aasta aega täitmata_0 # if _isClauseFinal(tokenWID, clauseTokens ) and \ not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], \ tokenWID, True, True, True): expansion = token break # Veakoht: kui vahel on 'ilma', siis see heuristik eksib t6en2oliselt: # on_0 lihtsalt tõlgendatavad ka ilma situatsioonis osalemata_0 oleInfFollowing += 1 # # Heuristik: # Kui osalauses ei j2rgne 'olema'-verbiga yhilduvaid verbe, kyll aga eelneb vahetult # m6ni selline ning seda pole veel m2rgendatud, loeme selle potentsiaalselt olema-verbiga # yhilduvaks, nt: # # Unustatud_0 ei_0 ole_0 ka mänge . # Tõhustatud_0 on_0 ka turvameetmeid . # milleks looja ta maailma loonud_0 on_0 , nimelt soo jätkamiseks . # if oleInfFollowing == 0 and not expansion: minWID = min( verbObj[PHRASE] ) lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID] lastTokIndex = lastTokIndex[0] token = clauseTokens[lastTokIndex-1] if lastTokIndex-1 > -1 and token[WORD_ID] not in annotatedWords: if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)): expansion = token appliedRule = 1 # # Eituse (aga ka vastavates jaatuse) fraasides j22vad siin eraldamata # ei + ole + Adv/Nom + Verb_da # mustrid, nt: # Ei_0 ole_0 mõtet teha sellist söögikohta . # Ei_0 ole_0 võimalik väiksema vastu vahetada . # Ei_0 ole_0 pankuril vaja teada . # Nendega proovime tegeleda hiljem. # else: # # Leiame ahela alguspunkti (minimaalse ID-ga verbi) # minWID = min( verbObj[PHRASE] ) lastTokIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == minWID] if lastTokIndex: lastTokIndex = lastTokIndex[0] if lastTokIndex-1 > -1 and clauseTokens[lastTokIndex-1][WORD_ID] not in annotatedWords: # # Heuristik: # Kui "olema"-l6puline ahel on osalause l6pus, ning vahetult eelneb nud/tud/mas, # siis loeme selle olema juurde kuuluvaks, nt: # mis juba olnud ja veel tulemas_0 on_0 , # Eesti selle alamprojektiga seotud_0 ei_0 ole_0 . # trombootilisi episoode kordunud_0 ei_0 ole_0 . # (Yldiselt paistab suhteliselt v2heproduktiivne reegel olevat) # token = clauseTokens[lastTokIndex-1] if (verbOleJarel1.matches(token) or verbOleJarel2.matches(token)): expansion = token if expansion: tokenWID = expansion[WORD_ID] verbObj[PHRASE].append( tokenWID ) verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, [verbOleJarel1, verbOleJarel2, verbMata] ) ) if verbOle.matches(expansion): verbObj[PATTERN].append('ole') else: verbObj[PATTERN].append('verb') annotatedWords.append( tokenWID )
python
def get_html_filename(filename): """Converts the filename to a .html extension""" if ".html" in filename: newFilename = filename elif ".md" in filename: newFilename = filename.replace(".md", ".html") elif ".tile" in filename: newFilename = filename.replace(".tile", ".html") elif ".jade" in filename: newFilename = filename.replace(".jade", ".html") elif ".txt" in filename: newFilename = filename.replace(".txt", ".html") elif ".rst" in filename: newFilename = filename.replace(".rst", ".html") elif ".docx" in filename: newFilename = filename.replace(".docx", ".html") else: print(filename + " is not a valid file type!") return newFilename
java
public BufferedWriter getWriter(boolean isAppend) throws IORuntimeException { try { return new BufferedWriter(new OutputStreamWriter(new FileOutputStream(FileUtil.touch(file), isAppend), charset)); } catch (Exception e) { throw new IORuntimeException(e); } }
python
def _setup(self): """ Generates _id_map from _alternatives to allow validating contents """ cls = self.__class__ cls._id_map = {} cls._name_map = {} for index, info in enumerate(cls._alternatives): if len(info) < 3: info = info + ({},) cls._alternatives[index] = info id_ = _build_id_tuple(info[2], info[1]) cls._id_map[id_] = index cls._name_map[info[0]] = index
java
private static int zero(byte[] block, int offset, int len) { Util.zero(block, offset, len); return len; }
java
public static SegwitAddress fromKey(NetworkParameters params, ECKey key) { checkArgument(key.isCompressed(), "only compressed keys allowed"); return fromHash(params, key.getPubKeyHash()); }
java
public void put(Object key, Object value) { int weight = getWeight(key) + getWeight(value) + OVERHEAD; currentWeight += weight; if (cache.put(key, value == null ? NULL_VALUE : value) != null) { currentWeight -= weight; } }
java
public int getNrOfClasses() { HashMap<Integer, Integer> classes = new HashMap<Integer, Integer>(); for (DataObject currentObject : dataList) { if (!classes.containsKey(currentObject.getClassLabel())) classes.put(currentObject.getClassLabel(), 1); } return classes.size(); }
java
public boolean isStaleElementException(WebDriverException e) { boolean result = false; if (e instanceof StaleElementReferenceException) { result = true; } else { String msg = e.getMessage(); if (msg != null) { result = msg.contains("Element does not exist in cache") // Safari stale element || msg.contains("unknown error: unhandled inspector error: {\"code\":-32000,\"message\":\"Cannot find context with specified id\"}") // chrome error || msg.contains("Error: element is not attached to the page document") // Alternate Chrome stale element || msg.contains("can't access dead object"); // Firefox stale element } } return result; }
python
def run(configobj, wcsmap=None): """ Initial example by Nadia ran MD with configobj EPAR using: It can be run in one of two ways: from stsci.tools import teal 1. Passing a config object to teal teal.teal('drizzlepac/pars/astrodrizzle.cfg') 2. Passing a task name: teal.teal('astrodrizzle') The example config files are in drizzlepac/pars """ # turn on logging, redirecting stdout/stderr messages to a log file # while also printing them out to stdout as well # also, initialize timing of processing steps # # We need to define a default logfile name from the user's parameters input_list, output, ivmlist, odict = \ processInput.processFilenames(configobj['input']) if output is not None: def_logname = output elif len(input_list) > 0: def_logname = input_list[0] else: print(textutil.textbox( "ERROR:\nNo valid input files found! Please restart the task " "and check the value for the 'input' parameter."), file=sys.stderr) def_logname = None return clean = configobj['STATE OF INPUT FILES']['clean'] procSteps = util.ProcSteps() print("AstroDrizzle Version {:s} ({:s}) started at: {:s}\n" .format(__version__, __version_date__, util._ptime()[0])) util.print_pkg_versions(log=log) log.debug('') log.debug( "==== AstroDrizzle was invoked with the following parameters: ====" ) log.debug('') util.print_cfg(configobj, log.debug) try: # Define list of imageObject instances and output WCSObject instance # based on input paramters imgObjList = None procSteps.addStep('Initialization') imgObjList, outwcs = processInput.setCommonInput(configobj) procSteps.endStep('Initialization') if imgObjList is None or not imgObjList: errmsg = "No valid images found for processing!\n" errmsg += "Check log file for full details.\n" errmsg += "Exiting AstroDrizzle now..." print(textutil.textbox(errmsg, width=65)) print(textutil.textbox( 'ERROR:\nAstroDrizzle Version {:s} encountered a problem! ' 'Processing terminated at {:s}.' .format(__version__, util._ptime()[0])), file=sys.stderr) return log.info("USER INPUT PARAMETERS common to all Processing Steps:") util.printParams(configobj, log=log) # Call rest of MD steps... #create static masks for each image staticMask.createStaticMask(imgObjList, configobj, procSteps=procSteps) #subtract the sky sky.subtractSky(imgObjList, configobj, procSteps=procSteps) # _dbg_dump_virtual_outputs(imgObjList) #drizzle to separate images adrizzle.drizSeparate(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) # _dbg_dump_virtual_outputs(imgObjList) #create the median images from the driz sep images createMedian.createMedian(imgObjList, configobj, procSteps=procSteps) #blot the images back to the original reference frame ablot.runBlot(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) #look for cosmic rays drizCR.rundrizCR(imgObjList, configobj, procSteps=procSteps) #Make your final drizzled image adrizzle.drizFinal(imgObjList, outwcs, configobj, wcsmap=wcsmap, procSteps=procSteps) print() print("AstroDrizzle Version {:s} is finished processing at {:s}.\n" .format(__version__, util._ptime()[0])) except: clean = False print(textutil.textbox( "ERROR:\nAstroDrizzle Version {:s} encountered a problem! " "Processing terminated at {:s}." .format(__version__, util._ptime()[0])), file=sys.stderr) raise finally: procSteps.reportTimes() if imgObjList: for image in imgObjList: if clean: image.clean() image.close() del imgObjList del outwcs
python
def ping(): ''' Returns true if the device is reachable, else false. ''' try: cookie = logon() logout(cookie) except salt.exceptions.CommandExecutionError: return False except Exception as err: log.debug(err) return False return True
java
private boolean parseEscapedHash(String hash) { if (StringUtils.isEmpty(hash)) return false; hashEscaped = true; int maxPage = -1; try { for (String str : hash.split("&")) { int idx = str.indexOf("="); if (idx <= 0) continue; String name = URLDecoder.decode(str.substring(0, idx), "UTF-8"); String value = URLDecoder.decode(str.substring(idx + 1), "UTF-8"); if (name.indexOf("[") == -1) { getHashParams("root", true).put(name, value); continue; } Matcher m = P_GROUP_PARAM.matcher(name); if (m.find()) { getHashParams(m.group(1), true).put(m.group(2), value); continue; } m = P_PAGE_URL.matcher(name); if (m.find()) { int page = Integer.parseInt(m.group(1)); if (page > maxPage) topPageParams = null; if (page >= maxPage) { maxPage = page; topPageUrl = value; continue; } } m = P_PAGE_PARAMETER.matcher(name); if (m.find()) { int page = Integer.parseInt(m.group(1)); if (page > maxPage) { topPageUrl = null; topPageParams = null; } if (page >= maxPage) { if (topPageParams == null) topPageParams = new HashMap<String, String>(); topPageParams.put(m.group(2), value); continue; } } // 不支持的参数,忽略不处理 System.err.println("Unsupported escaped hash parameter name: " + name + ", value: " + value); } } catch (Exception e) { e.printStackTrace(); } return true; }
python
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): """Mount a filesystem at a particular mountpoint""" cmd_args = ['mount'] if options is not None: cmd_args.extend(['-o', options]) cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False if persist: return fstab_add(device, mountpoint, filesystem, options=options) return True
java
public <T> CompletableFuture<T> putAsync(final Class<T> type, @DelegatesTo(HttpConfig.class) final Closure closure) { return CompletableFuture.supplyAsync(() -> put(type, closure), getExecutor()); }
python
def ensure_compliance(self): """Ensure that the all registered files comply to registered criteria. """ for p in self.paths: if os.path.exists(p): if self.is_compliant(p): continue log('File %s is not in compliance.' % p, level=INFO) else: if not self.always_comply: log("Non-existent path '%s' - skipping compliance check" % (p), level=INFO) continue if self._take_action(): log("Applying compliance criteria to '%s'" % (p), level=INFO) self.comply(p)
java
@SuppressWarnings("unchecked") private void checkNeedUpdateTopologies(Map<String, StateHeartbeat> localWorkerStats, Map<Integer, LocalAssignment> localAssignments) throws Exception { Set<String> topologies = new HashSet<>(); for (Map.Entry<Integer, LocalAssignment> entry : localAssignments.entrySet()) { topologies.add(entry.getValue().getTopologyId()); } for (StateHeartbeat stateHb : localWorkerStats.values()) { State state = stateHb.getState(); if (!state.equals(State.notStarted)) { String topologyId = stateHb.getHeartbeat().getTopologyId(); topologies.remove(topologyId); } } long currTime = System.currentTimeMillis(); Set<String> needRemoveTopologies = new HashSet<>(); for (String topologyId : topologies) { try { long lastModifyTime = StormConfig.get_supervisor_topology_Bianrymodify_time(conf, topologyId); if ((currTime - lastModifyTime) / 1000 < (JStormUtils.MIN_1 * 2)) { LOG.debug("less than 2 minute, removing " + topologyId); needRemoveTopologies.add(topologyId); } } catch (Exception e) { LOG.error("Failed to get last modified time for topology" + topologyId, e); needRemoveTopologies.add(topologyId); } } topologies.removeAll(needRemoveTopologies); if (topologies.size() > 0) { LOG.debug("Following topologies are going to re-download jars, " + topologies); } needDownloadTopologies.set(topologies); }
java
void addIndexCondition(Expression[] exprList, Index index, int colCount, boolean isJoin) { // VoltDB extension if (rangeIndex == index && isJoinIndex && (!isJoin) && (multiColumnCount > 0) && (colCount == 0)) { // This is one particular set of conditions which broke the classification of // ON and WHERE clauses. return; } // End of VoltDB extension rangeIndex = index; isJoinIndex = isJoin; for (int i = 0; i < colCount; i++) { Expression e = exprList[i]; indexEndCondition = ExpressionLogical.andExpressions(indexEndCondition, e); } if (colCount == 1) { indexCondition = exprList[0]; } else { findFirstExpressions = exprList; isMultiFindFirst = true; multiColumnCount = colCount; } }
python
def _redraw(self): """ Forgets the current layout and redraws with the most recent information :return: None """ for row in self._rows: for widget in row: widget.grid_forget() offset = 0 if not self.headers else 1 for i, row in enumerate(self._rows): for j, widget in enumerate(row): widget.grid(row=i+offset, column=j)
python
def get_stations(self): """ Fetch the list of stations """ url = 'http://webservices.ns.nl/ns-api-stations-v2' raw_stations = self._request('GET', url) return self.parse_stations(raw_stations)
java
private static void changeNetworkWPA2EAP(WifiManager wifiManager, WifiParsedResult wifiResult) { WifiConfiguration config = changeNetworkCommon(wifiResult); // Hex passwords that are 64 bits long are not to be quoted. config.preSharedKey = quoteNonHex(wifiResult.getPassword(), 64); config.allowedAuthAlgorithms.set(WifiConfiguration.AuthAlgorithm.OPEN); config.allowedProtocols.set(WifiConfiguration.Protocol.RSN); // For WPA2 config.allowedKeyManagement.set(WifiConfiguration.KeyMgmt.WPA_EAP); config.allowedPairwiseCiphers.set(WifiConfiguration.PairwiseCipher.TKIP); config.allowedPairwiseCiphers.set(WifiConfiguration.PairwiseCipher.CCMP); config.allowedGroupCiphers.set(WifiConfiguration.GroupCipher.TKIP); config.allowedGroupCiphers.set(WifiConfiguration.GroupCipher.CCMP); config.enterpriseConfig.setIdentity(wifiResult.getIdentity()); config.enterpriseConfig.setAnonymousIdentity(wifiResult.getAnonymousIdentity()); config.enterpriseConfig.setPassword(wifiResult.getPassword()); config.enterpriseConfig.setEapMethod(parseEap(wifiResult.getEapMethod())); config.enterpriseConfig.setPhase2Method(parsePhase2(wifiResult.getPhase2Method())); updateNetwork(wifiManager, config); }
java
public void setFrameData(String id, byte[] data) { if (allow(ID3V2)) { id3v2.updateFrameData(id, data); } }
python
def get_path(language): ''' Returns the full path to the language file ''' filename = language.lower() + '.json' lang_file_path = os.path.join(_DEFAULT_DIR, filename) if not os.path.exists(lang_file_path): raise IOError('Could not find {} language file'.format(language)) return lang_file_path
python
async def add_message(self, request): """Registers a new message for the user.""" session = await get_session(request) user_id = session.get('user_id') if not user_id: raise web.HTTPNotAuthorized() form = await request.post() if form.get('text'): user = await self.mongo.user.find_one( {'_id': ObjectId(session['user_id'])}, {'email': 1, 'username': 1}) await self.mongo.message.insert( {'author_id': ObjectId(user_id), 'email': user['email'], 'username': user['username'], 'text': form['text'], 'pub_date': datetime.datetime.utcnow()}) return redirect(request, 'timeline')
python
def ntp(name, servers): ''' Ensures that the NTP servers are configured. Servers are provided as an individual string or list format. Only four NTP servers will be reviewed. Any entries past four will be ignored. name: The name of the module function to execute. servers(str, list): The IP address or FQDN of the NTP servers. SLS Example: .. code-block:: yaml ntp_configuration_list: cimc.ntp: - servers: - foo.bar.com - 10.10.10.10 ntp_configuration_str: cimc.ntp: - servers: foo.bar.com ''' ret = _default_ret(name) ntp_servers = ['', '', '', ''] # Parse our server arguments if isinstance(servers, list): i = 0 for x in servers: ntp_servers[i] = x i += 1 else: ntp_servers[0] = servers conf = __salt__['cimc.get_ntp']() # Check if our NTP configuration is already set req_change = False try: if conf['outConfigs']['commNtpProvider'][0]['ntpEnable'] != 'yes' \ or ntp_servers[0] != conf['outConfigs']['commNtpProvider'][0]['ntpServer1'] \ or ntp_servers[1] != conf['outConfigs']['commNtpProvider'][0]['ntpServer2'] \ or ntp_servers[2] != conf['outConfigs']['commNtpProvider'][0]['ntpServer3'] \ or ntp_servers[3] != conf['outConfigs']['commNtpProvider'][0]['ntpServer4']: req_change = True except KeyError as err: ret['result'] = False ret['comment'] = "Unable to confirm current NTP settings." log.error(err) return ret if req_change: try: update = __salt__['cimc.set_ntp_server'](ntp_servers[0], ntp_servers[1], ntp_servers[2], ntp_servers[3]) if update['outConfig']['commNtpProvider'][0]['status'] != 'modified': ret['result'] = False ret['comment'] = "Error setting NTP configuration." return ret except Exception as err: ret['result'] = False ret['comment'] = "Error setting NTP configuration." log.error(err) return ret ret['changes']['before'] = conf ret['changes']['after'] = __salt__['cimc.get_ntp']() ret['comment'] = "NTP settings modified." else: ret['comment'] = "NTP already configured. No changes required." ret['result'] = True return ret
java
public float get(int row, int col) { if (row < 0 || row > 3) { throw new IndexOutOfBoundsException("Index: " + row + ", Size: 4"); } if (col < 0 || col > 3) { throw new IndexOutOfBoundsException("Index: " + col + ", Size: 4"); } return m_data[row * 4 + col]; }
java
private static AbstractPlanNode pushDownAggregate(AbstractPlanNode root, AggregatePlanNode distNode, AggregatePlanNode coordNode, ParsedSelectStmt selectStmt) { AggregatePlanNode rootAggNode; // remember that coordinating aggregation has a pushed-down // counterpart deeper in the plan. this allows other operators // to be pushed down past the receive as well. if (coordNode != null) { coordNode.m_isCoordinatingAggregator = true; } /* * Push this node down to partition if it's distributed. First remove * the send/receive pair, add the node, then put the send/receive pair * back on top of the node, followed by another top node at the * coordinator. */ if (coordNode != null && root instanceof ReceivePlanNode) { AbstractPlanNode accessPlanTemp = root; root = accessPlanTemp.getChild(0).getChild(0); root.clearParents(); accessPlanTemp.getChild(0).clearChildren(); distNode.addAndLinkChild(root); if (selectStmt.hasPartitionColumnInGroupby()) { // Set post predicate for final distributed Aggregation node distNode.setPostPredicate(selectStmt.getHavingPredicate()); // Edge case: GROUP BY clause contains the partition column // No related GROUP BY or even Re-agg will apply on coordinator // Projection plan node can just be pushed down also except for // a very edge ORDER BY case. if (selectStmt.isComplexOrderBy()) { // Put the send/receive pair back into place accessPlanTemp.getChild(0).addAndLinkChild(distNode); root = processComplexAggProjectionNode(selectStmt, accessPlanTemp); return root; } root = processComplexAggProjectionNode(selectStmt, distNode); // Put the send/receive pair back into place accessPlanTemp.getChild(0).addAndLinkChild(root); return accessPlanTemp; } // Without including partition column in GROUP BY clause, // there has to be a top GROUP BY plan node on coordinator. // // Now that we're certain the aggregate will be pushed down // (no turning back now!), fix any APPROX_COUNT_DISTINCT aggregates. fixDistributedApproxCountDistinct(distNode, coordNode); // Put the send/receive pair back into place accessPlanTemp.getChild(0).addAndLinkChild(distNode); // Add the top node coordNode.addAndLinkChild(accessPlanTemp); rootAggNode = coordNode; } else { distNode.addAndLinkChild(root); rootAggNode = distNode; } // Set post predicate for final Aggregation node. rootAggNode.setPostPredicate(selectStmt.getHavingPredicate()); root = processComplexAggProjectionNode(selectStmt, rootAggNode); return root; }
python
def lfsr_next_one_seed(seed_iter, min_value_shift): """High-quality seeding for LFSR generators. The LFSR generator components discard a certain number of their lower bits when generating each output. The significant bits of their state must not all be zero. We must ensure that when seeding the generator. In case generators are seeded from an incrementing input (such as a system timer), and between increments only the lower bits may change, we would also like the lower bits of the input to change the initial state, and not just be discarded. So we do basic manipulation of the seed input value to ensure that all bits of the seed input affect the initial state. """ try: seed = seed_iter.next() except StopIteration: return 0xFFFFFFFF else: if seed is None: return 0xFFFFFFFF else: seed = int(seed) & 0xFFFFFFFF working_seed = (seed ^ (seed << 16)) & 0xFFFFFFFF min_value = 1 << min_value_shift if working_seed < min_value: working_seed = (seed << 24) & 0xFFFFFFFF if working_seed < min_value: working_seed ^= 0xFFFFFFFF return working_seed
java
@Override public void draw(float x,float y,float width,float height) { init(); draw(x,y,width,height,Color.white); }
java
private ColumnDef getColumnDefByName(CfDef columnFamily, ByteBuffer columnName) { for (ColumnDef columnDef : columnFamily.getColumn_metadata()) { byte[] currName = columnDef.getName(); if (ByteBufferUtil.compare(currName, columnName) == 0) { return columnDef; } } return null; }
python
def is_dirty(using=None): """ Returns True if the current transaction requires a commit for changes to happen. """ if using is None: dirty = False for using in tldap.backend.connections: connection = tldap.backend.connections[using] if connection.is_dirty(): dirty = True return dirty connection = tldap.backend.connections[using] return connection.is_dirty()
python
def register_options(cls, register): """Register an option to make capturing snapshots optional. This class is intended to be extended by Jvm resolvers (coursier and ivy), and the option name should reflect that. """ super(JvmResolverBase, cls).register_options(register) # TODO This flag should be defaulted to True when we are doing hermetic execution, # and should probably go away as we move forward into that direction. register('--capture-snapshots', type=bool, default=False, help='Enable capturing snapshots to add directory digests to dependency jars.' 'Note that this is necessary when hermetic execution is enabled.')
python
def to_array(self): """ Serializes this Document to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Document, self).to_array() array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str if self.thumb is not None: array['thumb'] = self.thumb.to_array() # type PhotoSize if self.file_name is not None: array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str if self.mime_type is not None: array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str if self.file_size is not None: array['file_size'] = int(self.file_size) # type int return array
python
def _set_vlan_name(self, v, load=False): """ Setter method for vlan_name, mapped from YANG variable /interface_vlan/interface/vlan/vlan_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_vlan_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vlan_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 32']}), is_leaf=True, yang_name="vlan-name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alternative name for the VLAN', u'cli-multi-value': None, u'alt-name': u'name'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vlan_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1 .. 32']}), is_leaf=True, yang_name="vlan-name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Alternative name for the VLAN', u'cli-multi-value': None, u'alt-name': u'name'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=True)""", }) self.__vlan_name = t if hasattr(self, '_set'): self._set()
java
public String generateJava2ContentParser(TypeName paramTypeName) { if (!managedParams.containsKey(paramTypeName)) { managedParams.put(paramTypeName, "" + (managedParams.size() + 1)); } return PARAM_PARSER_PREFIX + managedParams.get(paramTypeName); }
python
def int(self, *args): """ Return the integer stored in the specified node. Any type of integer will be decoded: byte, short, long, long long """ data = self.bytes(*args) if data is not None: if len(data) == 1: return struct.unpack("<B", data)[0] if len(data) == 2: return struct.unpack("<H", data)[0] if len(data) == 4: return struct.unpack("<L", data)[0] if len(data) == 8: return struct.unpack("<Q", data)[0] print("can't get int from %s" % hexdump(data))
java
public JobScheduleDeleteOptions withIfModifiedSince(DateTime ifModifiedSince) { if (ifModifiedSince == null) { this.ifModifiedSince = null; } else { this.ifModifiedSince = new DateTimeRfc1123(ifModifiedSince); } return this; }
python
def _get_out_file(work_dir, paired): """Retrieve manta output variant file, depending on analysis. """ if paired: if paired.normal_bam: base_file = "somaticSV.vcf.gz" else: base_file = "tumorSV.vcf.gz" else: base_file = "diploidSV.vcf.gz" return os.path.join(work_dir, "results", "variants", base_file)
python
def report( vulnerabilities, fileobj, print_sanitised, ): """ Prints issues in color-coded text format. Args: vulnerabilities: list of vulnerabilities to report fileobj: The output file object, which may be sys.stdout """ n_vulnerabilities = len(vulnerabilities) unsanitised_vulnerabilities = [v for v in vulnerabilities if not isinstance(v, SanitisedVulnerability)] n_unsanitised = len(unsanitised_vulnerabilities) n_sanitised = n_vulnerabilities - n_unsanitised heading = "{} vulnerabilit{} found{}.\n".format( 'No' if n_unsanitised == 0 else n_unsanitised, 'y' if n_unsanitised == 1 else 'ies', " (plus {} sanitised)".format(n_sanitised) if n_sanitised else "", ) vulnerabilities_to_print = vulnerabilities if print_sanitised else unsanitised_vulnerabilities with fileobj: for i, vulnerability in enumerate(vulnerabilities_to_print, start=1): fileobj.write(vulnerability_to_str(i, vulnerability)) if n_unsanitised == 0: fileobj.write(color(heading, GOOD)) else: fileobj.write(color(heading, DANGER))
java
public StartGameSessionPlacementRequest withDesiredPlayerSessions(DesiredPlayerSession... desiredPlayerSessions) { if (this.desiredPlayerSessions == null) { setDesiredPlayerSessions(new java.util.ArrayList<DesiredPlayerSession>(desiredPlayerSessions.length)); } for (DesiredPlayerSession ele : desiredPlayerSessions) { this.desiredPlayerSessions.add(ele); } return this; }
java
public void fastClear(int index) { assert index >= 0 && index < numBits; int wordNum = index >> 6; int bit = index & 0x03f; long bitmask = 1L << bit; bits[wordNum] &= ~bitmask; // hmmm, it takes one more instruction to clear than it does to set... any // way to work around this? If there were only 63 bits per word, we could // use a right shift of 10111111...111 in binary to position the 0 in the // correct place (using sign extension). // Could also use Long.rotateRight() or rotateLeft() *if* they were converted // by the JVM into a native instruction. // bits[word] &= Long.rotateLeft(0xfffffffe,bit); }
python
def update(self, data): """ Update metadata, handle virtual hierarchy """ # Nothing to do if no data if data is None: return for key, value in sorted(data.items()): # Handle child attributes if key.startswith('/'): name = key.lstrip('/') # Handle deeper nesting (e.g. keys like /one/two/three) by # extracting only the first level of the hierarchy as name match = re.search("([^/]+)(/.*)", name) if match: name = match.groups()[0] value = {match.groups()[1]: value} # Update existing child or create a new one self.child(name, value) # Update regular attributes else: self.data[key] = value log.debug("Data for '{0}' updated.".format(self)) log.data(pretty(self.data))
python
def _balance(self, ): """ calc unbalanced charges and radicals for skin atoms """ meta = h.meta for n in (skin_reagent.keys() | skin_product.keys()): lost = skin_reagent[n] cycle_lost = cycle(lost) new = skin_product[n] cycle_new = cycle(new) atom = h._node[n] dr = atom.p_radical - atom.radical # radical balancing if dr > 0: # radical added or increased. for _, m in zip(range(dr), cycle_lost): # homolysis s_atom = h._node[m] s_atom.p_multiplicity = radical_unmap[s_atom.p_radical + 1] meta.setdefault('rule #14. atom lost. common atom radical added or increased. ' 'lost atom radical added', []).append((m, n)) for m in lost[dr:]: meta.setdefault('rule #15. atom lost. common atom radical added or increased. ' 'lost atom radical unchanged', []).append((m, n)) elif dr < 0: # radical removed or decreased. if n in skin_product: for m in lost: meta.setdefault('rule #20. atom lost. common atom radical removed or decreased. ' 'lost atom radical unchanged', []).append((m, n)) else: for _, m in zip(range(-dr), cycle_lost): # radical elimination s_atom = h._node[m] s_atom.p_multiplicity = radical_unmap[s_atom.p_radical + 1] meta.setdefault('rule #21. atom lost. common atom radical removed or decreased. ' 'lost atom radical added', []).append((m, n)) for m in lost[-dr:]: meta.setdefault('rule #20. atom lost. common atom radical removed or decreased. ' 'lost atom radical unchanged', []).append((m, n)) else: env = h.environment(n) sv = atom.get_valence([(b.reagent, a.reagent) for b, a in env if b.order]) pv = atom.p_get_valence([(b.product, a.product) for b, a in env if b.p_order]) sh, ph = h.atom_total_h(n) dv = pv - sv dh = ph - sh dc = atom.p_charge - atom.charge if not (dv or dh or dc): # common atom unchanged. Substitution, Elimination for m in skins: meta.setdefault('rule #1. atom lost. common atom unchanged. ' 'substitution, elimination, addition', []).append((m, n)) elif dv == dh == dc < 0: # explicit hydrogen removing for m in skins: h._node[m].p_charge = 1 meta.setdefault('rule #4. atom lost. common atom deprotonation', []).append((m, n)) else: for m in skins: meta.setdefault('rule #5. atom lost. common atom changed. ' 'convert to reduction or oxidation', []).append((m, n)) pth = ph + sum(h.atom_total_h(x)[1] for x in skins) if n in skin_product: sth = sh + sum(h.atom_total_h(x)[0] for x in skin_product[n]) else: sth = sh dth = pth - sth for n, skins in skin_product.items(): cycle_skins = cycle(skins) atom = h._node[n] dr = atom.p_radical - atom.radical # radical balancing if dr > 0: # radical added or increased. if n in skin_reagent: for m in skins: meta.setdefault('rule #16. atom new. common atom radical added or increased. ' 'new atom radical unchanged', []).append((m, n)) else: for _, m in zip(range(dr), cycle_skins): # radical addition s_atom = h._node[m] s_atom.multiplicity = radical_unmap[s_atom.radical + 1] meta.setdefault('rule #17. atom new. common atom radical added or increased. ' 'new atom radical added', []).append((m, n)) for m in skins[dr:]: meta.setdefault('rule #16. atom new. common atom radical added or increased. ' 'new atom radical unchanged', []).append((m, n)) elif dr < 0: # radical removed or decreased. for _, m in zip(range(-dr), cycle_skins): # recombination s_atom = h._node[m] s_atom.multiplicity = radical_unmap[s_atom.radical + 1] meta.setdefault('rule #18. atom new. common atom radical removed or decreased. ' 'new atom radical added', []).append((m, n)) for m in skins[-dr:]: meta.setdefault('rule #19. atom new. common atom radical removed or decreased. ' 'new atom radical unchanged', []).append((m, n)) else: env = h.environment(n) sv = atom.get_valence([(b.reagent, a.reagent) for b, a in env if b.order]) pv = atom.p_get_valence([(b.product, a.product) for b, a in env if b.p_order]) sh, ph = h.atom_total_h(n) dv = pv - sv dh = ph - sh dc = atom.p_charge - atom.charge if not (dv or dh or dc): # common atom unchanged. Substitution, Addition for m in skins: meta.setdefault('rule #2. atom new. common atom unchanged. ' 'substitution, elimination, addition', []).append((m, n)) elif dv == dh == dc > 0: # explicit hydrogen addition for m in skins: h._node[m].charge = 1 h.meta.setdefault('rule #3. atom new. common atom protonation', []).append((m, n)) else: for m in skins: meta.setdefault('rule #6. atom new. common atom changed. ' 'convert to reduction or oxidation', []).append((m, n)) sth = sh + sum(h.atom_total_h(x)[0] for x in skins) if n in skin_reagent: pth = ph + sum(h.atom_total_h(x)[1] for x in skin_reagent[n]) else: pth = ph dth = pth - sth for n, sp in reverse_ext.items(): # charge neutralization if dc > 0: for _ in range(dc): h.meta.setdefault('rule #7. charge neutralization. hydroxide radical added', []).append(h.add_atom(O(multiplicity=2), O(charge=-1))) elif dc < 0: for _ in range(-dc): h.meta.setdefault('rule #8. charge neutralization. hydrogen radical added', []).append(h.add_atom(H(multiplicity=2), H(charge=1))) # hydrogen balancing if dth > 0: red_e = 0 for m in sp['products']: if h.nodes[m]['element'] == 'H': # set reduction H if explicit H count increased h.nodes[m]['s_radical'] = 2 red_e += 1 h.meta.setdefault('rule #11. protonation. new explicit hydrogen radical added', []).append(m) red = [] for _ in range(dth - red_e): # add reduction agents m = h.add_atom(H(multiplicity=2), H()) red.append(m) h.meta.setdefault('rule #10. protonation. hydrogen radical added', []).append(m) red = iter(red) dih = sub(*h.atom_implicit_h(n)) if dih < 0: # attach reduction H to central atom if implicit H atoms count increased for _ in range(-dih): m = next(red) h.add_bond(m, n, None) h.meta.setdefault('rule #12. protonation. new implicit hydrogen radical added', []).append(m) for m in sp['reagents']: # attach reduction H if detached group implicit H count increased dih = sub(*h.atom_implicit_h(m)) if dih < 0: for _ in range(-dih): o = next(red) h.add_bond(o, m, None) elif dth < 0: oxo = [] for _ in range(-dth): m = h.add_atom(O(multiplicity=2), O()) oxo.append(m) h.meta.setdefault('rule #9. deprotonation. hydroxide radical added', []).append(m) oxo = iter(oxo) for m in sp['reagents']: if h.nodes[m]['element'] == 'H': o = next(oxo) h.add_bond(o, m, None) h.meta.setdefault('rule #13. hydrogen accepting by hydroxide radical added', []).append(m) return h
java
public void doGenerateExampleQueries(String args) { Boolean del = false; if (args != null && "overwrite".equals(args)) { del = true; } if (corpusList != null) { for (Long corpusId : corpusList) { System.out.println("generate example queries " + corpusId); queriesGenerator.generateQueries(corpusId, del); } } }
python
def setHierarchyLookup(self, columnName, tableType=None): """ Sets the hierarchy lookup for the inputed table type and column. :param columnName | <str> tableType | <subclass of Table> """ if tableType: tableType = self.tableType() self._hierarchyLookup[tableType] = (tableType, columnName)
python
def compute_and_save_video_metrics( output_dirs, problem_name, video_length, frame_shape): """Compute and saves the video metrics.""" statistics, all_results = compute_video_metrics_from_png_files( output_dirs, problem_name, video_length, frame_shape) for results, output_dir in zip(all_results, output_dirs): save_results(results, output_dir, problem_name) parent_dir = os.path.join(output_dirs[0], os.pardir) final_dir = os.path.join(parent_dir, "decode") tf.gfile.MakeDirs(parent_dir) save_results(statistics, final_dir, problem_name)
java
@Override public Writer append(CharSequence csq, int start, int end) throws IOException { encodedAppender.append(encoder, null, csq, 0, end - start); return this; }
java
public static double[] calcInverseSunVector( double[] sunVector ) { double m = Math.max(Math.abs(sunVector[0]), Math.abs(sunVector[1])); return new double[]{-sunVector[0] / m, -sunVector[1] / m, -sunVector[2] / m}; }
java
public static boolean isAsciiPrintable(String str) { if (str == null) { return false; } int sz = str.length(); for (int i = 0; i < sz; i++) { if (CharUtils.isAsciiPrintable(str.charAt(i)) == false) { return false; } } return true; }
python
def grep_full_py_identifiers(tokens): """ :param typing.Iterable[(str,str)] tokens: :rtype: typing.Iterator[str] """ global py_keywords tokens = list(tokens) i = 0 while i < len(tokens): token_type, token = tokens[i] i += 1 if token_type != "id": continue while i+1 < len(tokens) and tokens[i] == ("op", ".") and tokens[i+1][0] == "id": token += "." + tokens[i+1][1] i += 2 if token == "": continue if token in py_keywords: continue if token[0] in ".0123456789": continue yield token
java
public static void marginals(final Hypergraph graph, final Hyperpotential w, final Algebra s, final Scores scores) { final int n = graph.getNodes().size(); final double[] alpha = scores.alpha; final double[] beta = scores.beta; final double[] marginal = new double[n]; int root = graph.getRoot().getId(); // p(i) = \alpha_i * \beta_i / \beta_{root} for (Hypernode iNode : graph.getNodes()) { int i = iNode.getId(); marginal[i] = s.divide(s.times(alpha[i], beta[i]), beta[root]); } scores.marginal = marginal; }
java
@Override public DescribeReceiptRuleResult describeReceiptRule(DescribeReceiptRuleRequest request) { request = beforeClientExecution(request); return executeDescribeReceiptRule(request); }
python
def get (self, feature): """ Returns all values of 'feature'. """ if type(feature) == type([]): feature = feature[0] if not isinstance(feature, b2.build.feature.Feature): feature = b2.build.feature.get(feature) assert isinstance(feature, b2.build.feature.Feature) if self.feature_map_ is None: self.feature_map_ = {} for v in self.all_: if v.feature not in self.feature_map_: self.feature_map_[v.feature] = [] self.feature_map_[v.feature].append(v.value) return self.feature_map_.get(feature, [])
python
def list_views(): """ List all registered views """ echo_header("List of registered views") for view in current_app.appbuilder.baseviews: click.echo( "View:{0} | Route:{1} | Perms:{2}".format( view.__class__.__name__, view.route_base, view.base_permissions ) )
java
@Override protected boolean prepare(final Context2D context, final Attributes attr, final double alpha) { final double r = attr.getRadius(); if (r > 0) { context.beginPath(); context.arc(0, 0, r, 0, Math.PI * 2, true); context.closePath(); return true; } return false; }
python
def get_child_books(self, book_id): """Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=book_id) return BookLookupSession( self._proxy, self._runtime).get_books_by_ids( list(self.get_child_book_ids(book_id)))
python
def to_json(self, lev=0, indent=None): """ Serialize the content of this instance into a JSON string. :param lev: :param indent: Number of spaces that should be used for indentation :return: """ if lev: return self.to_dict(lev + 1) else: return json.dumps(self.to_dict(1), indent=indent)
java
private void readFile(InputStream is) throws IOException { StreamHelper.skip(is, 64); int index = 64; ArrayList<Integer> offsetList = new ArrayList<Integer>(); List<String> nameList = new ArrayList<String>(); while (true) { byte[] table = new byte[32]; is.read(table); index += 32; int offset = PEPUtility.getInt(table, 0); offsetList.add(Integer.valueOf(offset)); if (offset == 0) { break; } nameList.add(PEPUtility.getString(table, 5).toUpperCase()); } StreamHelper.skip(is, offsetList.get(0).intValue() - index); for (int offsetIndex = 1; offsetIndex < offsetList.size() - 1; offsetIndex++) { String name = nameList.get(offsetIndex - 1); Class<? extends Table> tableClass = TABLE_CLASSES.get(name); if (tableClass == null) { tableClass = Table.class; } Table table; try { table = tableClass.newInstance(); } catch (Exception ex) { throw new RuntimeException(ex); } m_tables.put(name, table); table.read(is); } }
python
def _post_run_hook(self, runtime): ''' generates a report showing slices from each axis of an arbitrary volume of in_file, with the resulting binary brain mask overlaid ''' self._anat_file = self.inputs.in_file self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file self._seg_files = [self._mask_file] self._masked = True NIWORKFLOWS_LOG.info( 'Generating report for nilearn.compute_epi_mask. file "%s", and mask file "%s"', self._anat_file, self._mask_file) return super(ComputeEPIMask, self)._post_run_hook(runtime)