language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static double[] toDouble(float[] a) { double[] d = new double[a.length]; for (int i = 0; i < a.length; i++) { d[i] = a[i]; } return d; }
java
static public WorkSheet unionWorkSheetsRowJoin(String w1FileName, String w2FileName, char delimitter, boolean secondSheetMetaData) throws Exception { WorkSheet w1 = WorkSheet.readCSV(w1FileName, delimitter); WorkSheet w2 = WorkSheet.readCSV(w2FileName, delimitter); return unionWorkSheetsRowJoin(w1, w2, secondSheetMetaData); }
java
static public void uncompileQuoter(final StringBuilder out, final String value) { out.append("'"); if (value != null) out.append(value.replaceAll("'", "\\\\'").replaceAll("\"", "\\\\\"")); out.append("'"); }
java
public boolean searchSIF(Model model, OutputStream out, SIFToText stt) { Set<SIFInteraction> inters = searchSIF(model); if (!inters.isEmpty()) { List<SIFInteraction> interList = new ArrayList<SIFInteraction>(inters); Collections.sort(interList); try { boolean first = true; OutputStreamWriter writer = new OutputStreamWriter(out); for (SIFInteraction inter : interList) { if (first) first = false; else writer.write("\n"); writer.write(stt.convert(inter)); } writer.close(); return true; } catch (IOException e) { e.printStackTrace(); } } return false; }
java
public void setAssociatedS3Resources(java.util.Collection<S3Resource> associatedS3Resources) { if (associatedS3Resources == null) { this.associatedS3Resources = null; return; } this.associatedS3Resources = new java.util.ArrayList<S3Resource>(associatedS3Resources); }
java
@Override public String getNamespaceURI(String prefix) { // per javax.xml.namespace.NamespaceContext doc if (prefix == null) throw new IllegalArgumentException("Cannot get namespace URI for null prefix"); if (XMLConstants.XML_NS_PREFIX.equals(prefix)) return XMLConstants.XML_NS_URI; if (XMLConstants.XMLNS_ATTRIBUTE.equals(prefix)) return XMLConstants.XMLNS_ATTRIBUTE_NS_URI; String namespaceURI = bindings.get(prefix); // per javax.xml.namespace.NamespaceContext doc return (namespaceURI != null) ? namespaceURI : XMLConstants.NULL_NS_URI; }
java
protected void writeOnceTuple( TupleRef tuple) { writer_ .element( ONCE_TAG) .content( () -> toStream( tuple.getVarBindings()).forEach( this::writeVarBinding)) .write(); }
java
private void dumpObjs(Object[][] objs, PrintStream out ) { for (int i = 0; i < objs.length; ++i) { for (int j = 0; j < objs[i].length; ++j) { out.println(i + " " + j + " " + objs[i][j]); } // for j } // for i }
python
def index_of_item(self, item): """Get the index for the given TreeItem :param item: the treeitem to query :type item: :class:`TreeItem` :returns: the index of the item :rtype: :class:`QtCore.QModelIndex` :raises: ValueError """ # root has an invalid index if item == self._root: return QtCore.QModelIndex() # find all parents to get their index parents = [item] i = item while True: parent = i.parent() # break if parent is root because we got all parents we need if parent == self._root: break if parent is None: # No new parent but last parent wasn't root! # This means that the item was not in the model! return QtCore.QModelIndex() # a new parent was found and we are still not at root # search further until we get to root i = parent parents.append(parent) # get the parent indexes until index = QtCore.QModelIndex() for treeitem in reversed(parents): parent = treeitem.parent() row = parent.childItems.index(treeitem) index = self.index(row, 0, index) return index
java
public static <T> Stream<T> skipWhile(Stream<T> source, Predicate<T> condition) { return StreamSupport.stream(SkipUntilSpliterator.over(source.spliterator(), condition.negate()), false) .onClose(source::close); }
java
public boolean addAll(Collection<? extends E> c) { if (!(c instanceof JumboEnumSet)) return super.addAll(c); JumboEnumSet<?> es = (JumboEnumSet<?>)c; if (es.elementType != elementType) { if (es.isEmpty()) return false; else throw new ClassCastException( es.elementType + " != " + elementType); } for (int i = 0; i < elements.length; i++) elements[i] |= es.elements[i]; return recalculateSize(); }
java
@Override public ZKData<byte[]> getZKByteData(String path, Watcher watcher) throws InterruptedException, KeeperException { Stat stat = new Stat(); return new ZKData<byte[]>(getData(path, watcher, stat), stat); }
java
public OvhBinaryFirewallLink serviceName_firewall_binary_link_GET(String serviceName, String binaryName) throws IOException { String qPath = "/dedicated/server/{serviceName}/firewall/binary/link"; StringBuilder sb = path(qPath, serviceName); query(sb, "binaryName", binaryName); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhBinaryFirewallLink.class); }
java
private static String getContentType(String fileExtension) { if (fileExtension == null) { return null; } String contentType = null; fileExtension = fileExtension.toLowerCase(); if ("html".equals(fileExtension)) { contentType = "text/html"; } else if ("htm".equals(fileExtension)) { contentType = "text/html"; } else if ("jar".equals(fileExtension)) { contentType = "application/java-archive"; } else if ("js".equals(fileExtension)) { contentType = "text/javascript"; } else if ("png".equals(fileExtension)) { contentType = "image/png"; } else if ("gif".equals(fileExtension)) { contentType = "image/gif"; } else if ("jpg".equals(fileExtension)) { contentType = "image/jpeg"; } else if ("jpeg".equals(fileExtension)) { contentType = "image/jpeg"; } else if ("css".equals(fileExtension)) { contentType = "text/css"; } else if ("swf".equals(fileExtension)) { contentType = "application/x-shockwave-flash"; } else if ("xap".equals(fileExtension)) { contentType = "application/x-silverlight-app"; } else if ("htc".equals(fileExtension)) { contentType = "text/x-component"; } else if ("jnlp".equals(fileExtension)) { contentType = "application/x-java-jnlp-file"; } else if ("manifest".equals(fileExtension)) { contentType = "text/cache-manifest"; } else if ("appcache".equals(fileExtension)) { contentType = "text/cache-manifest"; } else if ("vtt".equals(fileExtension)) { contentType = "text/vtt"; } else if ("aspx".equals(fileExtension)) { contentType = "text/html"; } else if ("apk".equals(fileExtension)) { contentType = "application/vnd.android.package-archive"; } return contentType; }
java
protected void toStringFields(final StringBuffer buffer) { buffer.append("<managedConnectionFactory="); buffer.append(_managedConnectionFactory); buffer.append("> <coreConnection="); buffer.append(_coreConnection); buffer.append("> <localTransaction="); buffer.append(_localTransaction); buffer.append("> <xaResource="); buffer.append(_xaResource); buffer.append("> <metaData="); buffer.append(_metaData); buffer.append("> <userDetails="); buffer.append(_userDetails); buffer.append("> <subject="); buffer.append(subjectToString(_subject)); buffer.append("> <logWriter="); buffer.append(_logWriter); buffer.append("> <sessions="); buffer.append(_sessions); // Don't call toString on listeners as this will result in recursive calls under JBoss and a StackOverFlow buffer.append("> <connectionListeners=["); for (int i = 0; i < _connectionListeners.size(); i++) { Object o = _connectionListeners.get(i); if (i > 0) buffer.append(","); buffer.append(o.getClass().getName() + "@" + o.hashCode()); } buffer.append("]>"); }
java
public static Writer createWriter(OutputStream outputStream,String encoding) { //get encoding String updatedEncoding=IOHelper.getEncodingToUse(encoding); //create writer Writer writer=null; try { writer=new OutputStreamWriter(outputStream,updatedEncoding); } catch(UnsupportedEncodingException exception) { throw new FaxException("Unable to create writer, unsupported encoding: "+encoding,exception); } return writer; }
java
public static HiveMetastoreClientPool get(final Properties properties, final Optional<String> metastoreURI) throws IOException { synchronized (HiveMetastoreClientPool.class) { if (poolCache == null) { poolCache = createPoolCache(properties); } } try { return poolCache.get(metastoreURI, new Callable<HiveMetastoreClientPool>() { @Override public HiveMetastoreClientPool call() throws Exception { return new HiveMetastoreClientPool(properties, metastoreURI); } }); } catch (ExecutionException ee) { throw new IOException("Failed to get " + HiveMetastoreClientPool.class.getSimpleName(), ee.getCause()); } }
java
@Override protected MkTabEntry createNewDirectoryEntry(MkTabTreeNode<O> node, DBID routingObjectID, double parentDistance) { return new MkTabDirectoryEntry(routingObjectID, parentDistance, node.getPageID(), node.coveringRadiusFromEntries(routingObjectID, this), node.kNNDistances()); }
python
def derive_and_set_name_fields_and_slug( self, set_name_sort=True, set_slug=True ): """ Derive subordinate name_* field values from the `name_full` field unless these fields are set in their own right. This method is called during `save()` """ # name_full is the primary required name field. It must be set. if is_empty(self.name_full): if not is_empty(self.name_display): self.name_full = self.name_display else: raise ValueError( u"%s.name_full cannot be empty at save" % type(self).__name__) # if empty, `name_sort` == `name_full` if set_name_sort and is_empty(self.name_sort): self.name_sort = self.derive_sort_name() # if empty, `slug` is set to slugified `name_full` if set_slug and is_empty(self.slug): self.slug = slugify(self.name_display or self.name_full)
java
protected Integer getFontSizeForZoom(double zoom) { double lower = -1; for (double upper : this.zoomToFontSizeMap.keySet()) { if (lower == -1) { lower = upper; if (zoom <= lower) return this.zoomToFontSizeMap.get(upper); continue; } if (zoom > lower && zoom <= upper) { return this.zoomToFontSizeMap.get(upper); } lower = upper; } return this.zoomToFontSizeMap.get(lower); }
python
def macaroon(self, version, expiry, caveats, ops): ''' Takes a macaroon with the given version from the oven, associates it with the given operations and attaches the given caveats. There must be at least one operation specified. The macaroon will expire at the given time - a time_before first party caveat will be added with that time. @return: a new Macaroon object. ''' if len(ops) == 0: raise ValueError('cannot mint a macaroon associated ' 'with no operations') ops = canonical_ops(ops) root_key, storage_id = self.root_keystore_for_ops(ops).root_key() id = self._new_macaroon_id(storage_id, expiry, ops) id_bytes = six.int2byte(LATEST_VERSION) + \ id.SerializeToString() if macaroon_version(version) < MACAROON_V2: # The old macaroon format required valid text for the macaroon id, # so base64-encode it. id_bytes = raw_urlsafe_b64encode(id_bytes) m = Macaroon( root_key, id_bytes, self.location, version, self.namespace, ) m.add_caveat(checkers.time_before_caveat(expiry), self.key, self.locator) m.add_caveats(caveats, self.key, self.locator) return m
java
public void setSelectableDateRange(Date min, Date max) { if (min == null) { minSelectableDate = defaultMinSelectableDate; } else { minSelectableDate = min; } if (max == null) { maxSelectableDate = defaultMaxSelectableDate; } else { maxSelectableDate = max; } if (maxSelectableDate.before(minSelectableDate)) { minSelectableDate = defaultMinSelectableDate; maxSelectableDate = defaultMaxSelectableDate; } }
python
def rm_job(user, path, mask, cmd): ''' Remove a incron job for a specified user. If any of the day/time params are specified, the job will only be removed if the specified params match. CLI Example: .. code-block:: bash salt '*' incron.rm_job root /path ''' # Scrub the types mask = six.text_type(mask).upper() # Check for valid mask types for item in mask.split(','): if item not in _MASK_TYPES: return 'Invalid mask type: {0}' . format(item) lst = list_tab(user) ret = 'absent' rm_ = None for ind in range(len(lst['crons'])): if rm_ is not None: break if path == lst['crons'][ind]['path']: if cmd == lst['crons'][ind]['cmd']: if mask == lst['crons'][ind]['mask']: rm_ = ind if rm_ is not None: lst['crons'].pop(rm_) ret = 'removed' comdat = _write_incron_lines(user, _render_tab(lst)) if comdat['retcode']: # Failed to commit, return the error return comdat['stderr'] return ret
python
def parse(self, stream): """Parses the keys and values from a config file.""" yaml = self._load_yaml() try: parsed_obj = yaml.safe_load(stream) except Exception as e: raise ConfigFileParserException("Couldn't parse config file: %s" % e) if not isinstance(parsed_obj, dict): raise ConfigFileParserException("The config file doesn't appear to " "contain 'key: value' pairs (aka. a YAML mapping). " "yaml.load('%s') returned type '%s' instead of 'dict'." % ( getattr(stream, 'name', 'stream'), type(parsed_obj).__name__)) result = OrderedDict() for key, value in parsed_obj.items(): if isinstance(value, list): result[key] = value else: result[key] = str(value) return result
python
def get_messages(self, queue_name, num_messages=None, visibility_timeout=None, timeout=None): ''' Retrieves one or more messages from the front of the queue. When a message is retrieved from the queue, the response includes the message content and a pop_receipt value, which is required to delete the message. The message is not automatically deleted from the queue, but after it has been retrieved, it is not visible to other clients for the time interval specified by the visibility_timeout parameter. If the key-encryption-key or resolver field is set on the local service object, the messages will be decrypted before being returned. :param str queue_name: The name of the queue to get messages from. :param int num_messages: A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. :param int visibility_timeout: Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 1 second, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. :param int timeout: The server timeout, expressed in seconds. :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed. :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) ''' _validate_decryption_required(self.require_encryption, self.key_encryption_key, self.key_resolver_function) _validate_not_none('queue_name', queue_name) request = HTTPRequest() request.method = 'GET' request.host_locations = self._get_host_locations() request.path = _get_path(queue_name, True) request.query = { 'numofmessages': _to_str(num_messages), 'visibilitytimeout': _to_str(visibility_timeout), 'timeout': _int_to_str(timeout) } return self._perform_request(request, _convert_xml_to_queue_messages, [self.decode_function, self.require_encryption, self.key_encryption_key, self.key_resolver_function])
java
public int getRow(float y) { int row = 0; y += h(padTop); int i = 0, n = cells.size(); if (n == 0) return -1; if (n == 1) return 0; if (cells.get(0).widgetY < cells.get(1).widgetY) { // Using y-down coordinate system. while (i < n) { Cell<C, T> c = cells.get(i++); if (c.getIgnore()) continue; if (c.widgetY + c.computedPadTop > y) break; if (c.endRow) row++; } return row - 1; } // Using y-up coordinate system. while (i < n) { Cell<C, T> c = cells.get(i++); if (c.getIgnore()) continue; if (c.widgetY + c.computedPadTop < y) break; if (c.endRow) row++; } return row; }
java
@Override public ContentSerializer getContentSerializerForContentType(String contentType) { for (ContentSerializer renderer : serializers) { if (renderer.getContentType().equals(contentType)) { return renderer; } } LoggerFactory.getLogger(this.getClass()).info("Cannot find a content renderer handling " + contentType); return null; }
python
def average_build_duration(connection, package): """ Return the average build duration for a package (or container). :param connection: txkoji.Connection :param package: package name :returns: deferred that when fired returns a datetime.timedelta object """ if isinstance(package, str) and package.endswith('-container'): return average_last_builds(connection, package) return connection.getAverageBuildDuration(package)
python
def enable_evb(self): """Function to enable EVB on the interface. """ if self.is_ncb: self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb", "-V", "evb", "enableTx=yes"]) ret = self.enable_gpid() return ret else: LOG.error("EVB cannot be set on NB") return False
java
public static Double toDouble(Object value, Double defaultValue) { return convert(Double.class, value, defaultValue); }
java
public static String jqPlotToJson(ChartConfiguration<?> jqPlot) { XStream xstream = new XStream(new JsonHierarchicalStreamDriver() { @Override public HierarchicalStreamWriter createWriter(Writer writer) { return new JqPlotJsonMapHierarchicalWriter(writer, JsonWriter.DROP_ROOT_MODE) { @Override public void addAttribute(String name, String value) { if (!name.contains("class")) { super.addAttribute(name, value); } } }; } }) { }; EnumConverter converter = new EnumConverter() { @Override public void marshal(Object source, HierarchicalStreamWriter writer, MarshallingContext context) { if(source instanceof JqPlotResources) { JqPlotResources plugin = (JqPlotResources) source; writer.setValue(plugin.getClassName()); } else { super.marshal(source, writer, context); } } }; converter.canConvert(JqPlotResources.class); xstream.registerConverter(converter); return xstream.toXML(jqPlot); }
python
def fill_data_brok_from(self, data, brok_type): """ Add properties to 'data' parameter with properties of this object when 'brok_type' parameter is defined in fill_brok of these properties :param data: object to fill :type data: object :param brok_type: name of brok_type :type brok_type: var :return: None """ cls = self.__class__ # Configuration properties for prop, entry in list(cls.properties.items()): # Is this property intended for broking? if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.properties) # And the running properties if hasattr(cls, 'running_properties'): # We've got prop in running_properties too for prop, entry in list(cls.running_properties.items()): # if 'fill_brok' in cls.running_properties[prop]: if brok_type in entry.fill_brok: data[prop] = self.get_property_value_for_brok(prop, cls.running_properties)
java
@RequestMapping(value = "/scripts", method = RequestMethod.GET) public String scriptView(Model model) throws Exception { return "script"; }
java
public DescribeCacheSubnetGroupsResult withCacheSubnetGroups(CacheSubnetGroup... cacheSubnetGroups) { if (this.cacheSubnetGroups == null) { setCacheSubnetGroups(new com.amazonaws.internal.SdkInternalList<CacheSubnetGroup>(cacheSubnetGroups.length)); } for (CacheSubnetGroup ele : cacheSubnetGroups) { this.cacheSubnetGroups.add(ele); } return this; }
python
def _attach_dim_scales(self): """Attach dimension scales to all variables.""" for name, var in self.variables.items(): if name not in self.dimensions: for n, dim in enumerate(var.dimensions): var._h5ds.dims[n].attach_scale(self._all_h5groups[dim]) for subgroup in self.groups.values(): subgroup._attach_dim_scales()
java
public String getBaselineStartText(int baselineNumber) { Object result = getCachedValue(selectField(TaskFieldLists.BASELINE_STARTS, baselineNumber)); if (result == null) { result = getCachedValue(selectField(TaskFieldLists.BASELINE_ESTIMATED_STARTS, baselineNumber)); } if (!(result instanceof String)) { result = null; } return (String) result; }
python
def _handle_candles(self, ts, chan_id, data): """ Stores OHLC data received via wss in self.candles[chan_id] :param ts: timestamp, declares when data was received by the client :param chan_id: int, channel id :param data: list of data received via wss :return: """ pair = self.channel_labels[chan_id][1]['key'].split(':')[-1][1:] entry = data, ts self.data_q.put(('ohlc', pair, entry))
java
Location append(String relativeURI) { relativeURI = encodeIllegalCharacters(relativeURI); if (uri.toString().endsWith("/") && relativeURI.startsWith("/")) { relativeURI = relativeURI.substring(1); } if (!uri.toString().endsWith("/") && !relativeURI.startsWith("/")) { relativeURI = "/" + relativeURI; } return Location.of(URI.create(uri + relativeURI)); }
python
def create_table(db, schema_name, table_name, columns): """ Create a table, schema_name.table_name, in given database with given list of column names. """ table = '{0}.{1}'.format(schema_name, table_name) if schema_name else table_name db.execute('DROP TABLE IF EXISTS {0}'.format(table)) columns_list = ', '.join(columns) db.execute('CREATE TABLE {0} ({1})'.format(table, columns_list))
java
public void write(Pointer _str, final int _len) { int len = _len; byte[] bstr = _str.buffer; int str = _str.start; if(this.buffer == null) { clear(); } int at = this.marker; if(len + at >= this.bufsize - 1) { flush(0); for(;;) { int rest = (this.bufsize - 1) - this.marker; if(len <= rest) break; System.arraycopy(bstr, str, this.buffer, this.marker, rest); this.marker += rest; str += rest; len -= rest; flush(0); } } System.arraycopy(bstr, str, this.buffer, this.marker, len); this.marker += len; this.buffer[this.marker] = 0; }
python
def _request_get(self, path, params=None, json=True, url=BASE_URL): """Perform a HTTP GET request.""" url = urljoin(url, path) headers = self._get_request_headers() response = requests.get(url, params=params, headers=headers) if response.status_code >= 500: backoff = self._initial_backoff for _ in range(self._max_retries): time.sleep(backoff) backoff_response = requests.get( url, params=params, headers=headers, timeout=DEFAULT_TIMEOUT) if backoff_response.status_code < 500: response = backoff_response break backoff *= 2 response.raise_for_status() if json: return response.json() else: return response
python
def lgammln(xx): """ Returns the gamma function of xx. Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt. (Adapted from: Numerical Recipies in C.) Usage: lgammln(xx) """ coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516, 0.120858003e-2, -0.536382e-5] x = xx - 1.0 tmp = x + 5.5 tmp = tmp - (x+0.5)*math.log(tmp) ser = 1.0 for j in range(len(coeff)): x = x + 1 ser = ser + coeff[j]/x return -tmp + math.log(2.50662827465*ser)
java
protected AccessControlGroup group(String groupId, String... permissionIds) { return group(groupId, Collections.<AccessControlGroup> emptyList(), permissionIds); }
java
protected PortletRenderResult doRenderReplayCachedContent( IPortletWindow portletWindow, HttpServletRequest httpServletRequest, CacheState<CachedPortletData<PortletRenderResult>, PortletRenderResult> cacheState, PortletOutputHandler portletOutputHandler, RenderPart renderPart, long baseExecutionTime) throws IOException { enforceConfigPermission(httpServletRequest, portletWindow); logger.debug( "Replaying cached content for Render {} request to {}", renderPart, portletWindow); final long renderStartTime = System.nanoTime(); final CachedPortletData<PortletRenderResult> cachedPortletData = cacheState.getCachedPortletData(); cachedPortletData.replay(portletOutputHandler); final long executionTime = baseExecutionTime + (System.nanoTime() - renderStartTime); publishRenderEvent(portletWindow, httpServletRequest, renderPart, executionTime, true); final PortletRenderResult portletResult = cachedPortletData.getPortletResult(); return new PortletRenderResult(portletResult, executionTime); }
java
public static void updateBeanValue(final WComponent component, final boolean visibleOnly) { // Do not process if component is invisble and ignore visible is true. Will ignore entire branch from this point. if (!component.isVisible() && visibleOnly) { return; } if (component instanceof WBeanComponent) { ((WBeanComponent) component).updateBeanValue(); } // These components recursively update bean values themselves, // as they have special requirements due to repeating data. if (component instanceof WDataTable || component instanceof WTable || component instanceof WRepeater) { return; } if (component instanceof Container) { for (int i = ((Container) component).getChildCount() - 1; i >= 0; i--) { updateBeanValue(((Container) component).getChildAt(i), visibleOnly); } } }
java
double collectGraphLastValue(String graph) throws IOException { final URL lastValueUrl = new URL(url.toString() + '&' + HttpParameter.PART + '=' + HttpPart.LAST_VALUE + '&' + HttpParameter.GRAPH + '=' + graph); return collectForUrl(lastValueUrl); }
java
public static NumberData max(SoyValue arg0, SoyValue arg1) { if (arg0 instanceof IntegerData && arg1 instanceof IntegerData) { return IntegerData.forValue(Math.max(arg0.longValue(), arg1.longValue())); } else { return FloatData.forValue(Math.max(arg0.numberValue(), arg1.numberValue())); } }
python
def _apply_projection(self, Av): r'''Computes :math:`\langle C,M_lAM_rV_n\rangle` efficiently with a three-term recurrence.''' PAv, UAp = self.projection.apply_complement(Av, return_Ya=True) self._UAps.append(UAp) c = UAp.copy() rhos = self.rhos if self.iter > 0: c -= (1 + rhos[-1]/rhos[-2])*self._UAps[-2] if self.iter > 1: c += rhos[-2]/rhos[-3]*self._UAps[-3] c *= ((-1)**self.iter) / numpy.sqrt(rhos[-1]) if self.iter > 0: c -= numpy.sqrt(rhos[-2]/rhos[-1]) * self.C[:, [-1]] self.C = numpy.c_[self.C, c] return PAv
java
public After<PartialResponseInsertType<T>> getOrCreateAfter() { Node node = childNode.getOrCreate("after"); After<PartialResponseInsertType<T>> after = new AfterImpl<PartialResponseInsertType<T>>(this, "after", childNode, node); return after; }
java
public static Map<String, BeanProperty> getAllProperties(Class clazz) { synchronized (cPropertiesCache) { Map<String, BeanProperty> properties; SoftReference<Map<String, BeanProperty>> ref = cPropertiesCache.get(clazz); if (ref != null) { properties = ref.get(); if (properties != null) { return properties; } } properties = createProperties(clazz); cPropertiesCache.put(clazz, new SoftReference<Map<String, BeanProperty>>(properties)); return properties; } }
java
void onStop(final Throwable exception) { LOG.log(Level.FINE, "Stop Runtime: RM status {0}", this.resourceManager.getServiceState()); if (this.resourceManager.getServiceState() == Service.STATE.STARTED) { // invariant: if RM is still running then we declare success. try { this.reefEventHandlers.close(); if (exception == null) { this.resourceManager.unregisterApplicationMaster( FinalApplicationStatus.SUCCEEDED, "Success!", this.trackingUrl); } else { // Note: We don't allow RM to restart our applications if it's an application level failure. // If applications are to be long-running, they should catch Exceptions before the REEF level // instead of relying on the RM restart mechanism. // For this case, we make a strong assumption that REEF does not allow its own unhandled Exceptions // to leak to this stage. final String failureMsg = String.format("Application failed due to:%n%s%n" + "With stack trace:%n%s", exception.getMessage(), ExceptionUtils.getStackTrace(exception)); this.resourceManager.unregisterApplicationMaster( FinalApplicationStatus.FAILED, failureMsg, this.trackingUrl); } this.resourceManager.close(); LOG.log(Level.FINEST, "Container ResourceManager stopped successfully"); } catch (final Exception e) { LOG.log(Level.WARNING, "Error shutting down YARN application", e); } } if (this.nodeManager.getServiceState() == Service.STATE.STARTED) { try { this.nodeManager.close(); LOG.log(Level.FINEST, "Container NodeManager stopped successfully"); } catch (final IOException e) { LOG.log(Level.WARNING, "Error closing YARN Node Manager", e); } } }
java
private String newStorageID() { String newID = null; while (newID == null) { newID = "DS" + Integer.toString(r.nextInt()); if (datanodeMap.get(newID) != null) { newID = null; } } return newID; }
python
def ml(line, cell=None): """Implements the datalab cell magic for MLWorkbench operations. Args: line: the contents of the ml command line. Returns: The results of executing the cell. """ parser = google.datalab.utils.commands.CommandParser( prog='%ml', description=textwrap.dedent("""\ Execute MLWorkbench operations Use "%ml <command> -h" for help on a specific command. """)) dataset_parser = parser.subcommand( 'dataset', formatter_class=argparse.RawTextHelpFormatter, help='Create or explore datasets.') dataset_sub_commands = dataset_parser.add_subparsers(dest='command') dataset_create_parser = dataset_sub_commands.add_parser( 'create', help='Create datasets', formatter_class=argparse.RawTextHelpFormatter, epilog=textwrap.dedent("""\ Example usage: %%ml dataset name: mydata format: csv train: path/to/train.csv eval: path/to/eval.csv schema: - name: news_label type: STRING - name: text type: STRING""")) dataset_create_parser.add_argument('--name', required=True, help='the name of the dataset to define. ') dataset_create_parser.add_argument('--format', required=True, choices=['csv', 'bigquery', 'transformed'], help='The format of the data.') dataset_create_parser.add_argument('--train', required=True, help='The path of the training file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_argument('--eval', required=True, help='The path of the eval file pattern if format ' + 'is csv or transformed, or table name if format ' + 'is bigquery.') dataset_create_parser.add_cell_argument('schema', help='yaml representation of CSV schema, or path to ' + 'schema file. Only needed if format is csv.') dataset_create_parser.set_defaults(func=_dataset_create) dataset_explore_parser = dataset_sub_commands.add_parser( 'explore', help='Explore training data.') dataset_explore_parser.add_argument('--name', required=True, help='The name of the dataset to explore.') dataset_explore_parser.add_argument('--overview', action='store_true', default=False, help='Plot overview of sampled data. Set "sample_size" ' + 'to change the default sample size.') dataset_explore_parser.add_argument('--facets', action='store_true', default=False, help='Plot facets view of sampled data. Set ' + '"sample_size" to change the default sample size.') dataset_explore_parser.add_argument('--sample_size', type=int, default=1000, help='sample size for overview or facets view. Only ' + 'used if either --overview or --facets is set.') dataset_explore_parser.set_defaults(func=_dataset_explore) analyze_parser = parser.subcommand( 'analyze', formatter_class=argparse.RawTextHelpFormatter, help='Analyze training data and generate stats, such as min/max/mean ' 'for numeric values, vocabulary for text columns.', epilog=textwrap.dedent("""\ Example usage: %%ml analyze [--cloud] output: path/to/dir data: $mydataset features: serialId: transform: key num1: transform: scale value: 1 num2: transform: identity text1: transform: bag_of_words Also supports in-notebook variables, such as: %%ml analyze --output path/to/dir training_data: $my_csv_dataset features: $features_def""")) analyze_parser.add_argument('--output', required=True, help='path of output directory.') analyze_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run analysis in cloud or local.') analyze_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') analyze_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") analyze_parser.add_cell_argument( 'features', required=True, help=textwrap.dedent("""\ features config indicating how to transform data into features. The list of supported transforms: "transform: identity" does nothing (for numerical columns). "transform: scale value: x" scale a numerical column to [-a, a]. If value is missing, x defaults to 1. "transform: one_hot" treats the string column as categorical and makes one-hot encoding of it. "transform: embedding embedding_dim: d" treats the string column as categorical and makes embeddings of it with specified dimension size. "transform: bag_of_words" treats the string column as text and make bag of words transform of it. "transform: tfidf" treats the string column as text and make TFIDF transform of it. "transform: image_to_vec checkpoint: gs://b/o" from image gs url to embeddings. "checkpoint" is a inception v3 checkpoint. If absent, a default checkpoint is used. "transform: target" denotes the column is the target. If the schema type of this column is string, a one_hot encoding is automatically applied. If numerical, an identity transform is automatically applied. "transform: key" column contains metadata-like information and will be output as-is in prediction.""")) analyze_parser.set_defaults(func=_analyze) transform_parser = parser.subcommand( 'transform', formatter_class=argparse.RawTextHelpFormatter, help='Transform the data into tf.example which is more efficient in training.', epilog=textwrap.dedent("""\ Example usage: %%ml transform [--cloud] [--shuffle] analysis: path/to/analysis_output_folder output: path/to/dir batch_size: 100 data: $mydataset cloud: num_workers: 3 worker_machine_type: n1-standard-1 project_id: my_project_id""")) transform_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') transform_parser.add_argument('--output', required=True, help='path of output directory.') transform_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run transform in cloud or local.') transform_parser.add_argument('--shuffle', action='store_true', default=False, help='whether to shuffle the training data in output.') transform_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. ' 'Larger batch is more efficient but may consume more memory.') transform_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') transform_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") transform_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud config. All of them are optional. num_workers: Dataflow number of workers. If not set, DataFlow service will determine the number. worker_machine_type: a machine name from https://cloud.google.com/compute/docs/machine-types If not given, the service uses the default machine type. project_id: id of the project to use for DataFlow service. If not set, Datalab's default project (set by %%datalab project set) is used. job_name: Unique name for a Dataflow job to use. If not set, a random name will be used.""")) transform_parser.set_defaults(func=_transform) train_parser = parser.subcommand( 'train', formatter_class=argparse.RawTextHelpFormatter, help='Train a model.', epilog=textwrap.dedent("""\ Example usage: %%ml train [--cloud] analysis: path/to/analysis_output output: path/to/dir data: $mydataset model_args: model: linear_regression cloud_config: region: us-central1""")) train_parser.add_argument('--analysis', required=True, help='path of analysis output directory.') train_parser.add_argument('--output', required=True, help='path of trained model directory.') train_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run training in cloud or local.') train_parser.add_argument('--notb', action='store_true', default=False, help='If set, tensorboard is not automatically started.') train_parser.add_argument('--package', required=False, help='A local or GCS tarball path to use as the source. ' 'If not set, the default source package will be used.') train_parser.add_cell_argument( 'data', required=True, help="""Training data. A dataset defined by "%%ml dataset".""") package_model_help = subprocess.Popen( ['python', '-m', 'trainer.task', '--datalab-help'], cwd=DEFAULT_PACKAGE_PATH, stdout=subprocess.PIPE).communicate()[0] package_model_help = ('model_args: a dictionary of model specific args, including:\n\n' + package_model_help.decode()) train_parser.add_cell_argument('model_args', help=package_model_help) train_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud training config, including: job_id: the name of the job. If not provided, a default job name is created. region: see {url} runtime_version: see "region". Must be a string like '1.2'. scale_tier: see "region".""".format( url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/training'))) train_parser.set_defaults(func=_train) predict_parser = parser.subcommand( 'predict', formatter_class=argparse.RawTextHelpFormatter, help='Predict with local or deployed models. (Good for small datasets).', epilog=textwrap.dedent("""\ Example usage: %%ml predict headers: key,num model: path/to/model data: - key1,value1 - key2,value2 Or, in another cell, define a list of dict: my_data = [{'key': 1, 'num': 1.2}, {'key': 2, 'num': 2.8}] Then: %%ml predict headers: key,num model: path/to/model data: $my_data""")) predict_parser.add_argument('--model', required=True, help='The model path.') predict_parser.add_argument('--no_show_image', action='store_true', default=False, help='If not set, add a column of images in output.') predict_parser.add_cell_argument( 'data', required=True, help=textwrap.dedent("""\ Prediction data can be 1) CSV lines in the input cell in yaml format or 2) a local variable which is one of a) list of dict b) list of strings of csv lines c) a Pandas DataFrame""")) predict_parser.set_defaults(func=_predict) batch_predict_parser = parser.subcommand( 'batch_predict', formatter_class=argparse.RawTextHelpFormatter, help='Batch prediction with local or deployed models. (Good for large datasets)', epilog=textwrap.dedent("""\ Example usage: %%ml batch_predict [--cloud] model: path/to/model output: path/to/output format: csv data: csv: path/to/file_pattern""")) batch_predict_parser.add_argument('--model', required=True, help='The model path if not --cloud, or the id in ' 'the form of model.version if --cloud.') batch_predict_parser.add_argument('--output', required=True, help='The path of output directory with prediction results. ' 'If --cloud, it has to be GCS path.') batch_predict_parser.add_argument('--format', help='csv or json. For cloud run, ' 'the only supported format is json.') batch_predict_parser.add_argument('--batch_size', type=int, default=100, help='number of instances in a batch to process once. ' 'Larger batch is more efficient but may consume ' 'more memory. Only used in local run.') batch_predict_parser.add_argument('--cloud', action='store_true', default=False, help='whether to run prediction in cloud or local.') batch_predict_parser.add_cell_argument( 'data', required=True, help='Data to predict with. Only csv is supported.') batch_predict_parser.add_cell_argument( 'cloud_config', help=textwrap.dedent("""\ A dictionary of cloud batch prediction config. job_id: the name of the job. If not provided, a default job name is created. region: see {url} max_worker_count: see reference in "region".""".format( url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/prediction'))) # noqa batch_predict_parser.set_defaults(func=_batch_predict) explain_parser = parser.subcommand( 'explain', formatter_class=argparse.RawTextHelpFormatter, help='Explain a prediction with LIME tool.') explain_parser.add_argument('--type', default='all', choices=['text', 'image', 'tabular', 'all'], help='the type of column to explain.') explain_parser.add_argument('--algorithm', choices=['lime', 'ig'], default='lime', help='"lime" is the open sourced project for prediction explainer.' + '"ig" means integrated gradients and currently only applies ' + 'to image.') explain_parser.add_argument('--model', required=True, help='path of the model directory used for prediction.') explain_parser.add_argument('--labels', required=True, help='comma separated labels to explain.') explain_parser.add_argument('--column_name', help='the name of the column to explain. Optional if text type ' + 'and there is only one text column, or image type and ' + 'there is only one image column.') explain_parser.add_cell_argument('data', required=True, help='Prediction Data. Can be a csv line, or a dict.') explain_parser.add_cell_argument('training_data', help='A csv or bigquery dataset defined by %%ml dataset. ' + 'Used by tabular explainer only to determine the ' + 'distribution of numeric and categorical values. ' + 'Suggest using original training dataset.') # options specific for lime explain_parser.add_argument('--num_features', type=int, help='number of features to analyze. In text, it is number of ' + 'words. In image, it is number of areas. For lime only.') explain_parser.add_argument('--num_samples', type=int, help='size of the neighborhood to learn the linear model. ' + 'For lime only.') explain_parser.add_argument('--hide_color', type=int, default=0, help='the color to use for perturbed area. If -1, average of ' + 'each channel is used for each channel. For image only.') explain_parser.add_argument('--include_negative', action='store_true', default=False, help='whether to show only positive areas. For lime image only.') explain_parser.add_argument('--overview', action='store_true', default=False, help='whether to show overview instead of details view.' + 'For lime text and tabular only.') explain_parser.add_argument('--batch_size', type=int, default=100, help='size of batches passed to prediction. For lime only.') # options specific for integrated gradients explain_parser.add_argument('--num_gradients', type=int, default=50, help='the number of scaled images to get gradients from. Larger ' + 'number usually produces better results but slower.') explain_parser.add_argument('--percent_show', type=int, default=10, help='the percentage of top impactful pixels to show.') explain_parser.set_defaults(func=_explain) tensorboard_parser = parser.subcommand( 'tensorboard', formatter_class=argparse.RawTextHelpFormatter, help='Start/stop/list TensorBoard instances.') tensorboard_sub_commands = tensorboard_parser.add_subparsers(dest='command') tensorboard_start_parser = tensorboard_sub_commands.add_parser( 'start', help='Start a tensorboard instance.') tensorboard_start_parser.add_argument('--logdir', required=True, help='The local or GCS logdir path.') tensorboard_start_parser.set_defaults(func=_tensorboard_start) tensorboard_stop_parser = tensorboard_sub_commands.add_parser( 'stop', help='Stop a tensorboard instance.') tensorboard_stop_parser.add_argument('--pid', required=True, type=int, help='The pid of the tensorboard instance.') tensorboard_stop_parser.set_defaults(func=_tensorboard_stop) tensorboard_list_parser = tensorboard_sub_commands.add_parser( 'list', help='List tensorboard instances.') tensorboard_list_parser.set_defaults(func=_tensorboard_list) evaluate_parser = parser.subcommand( 'evaluate', formatter_class=argparse.RawTextHelpFormatter, help='Analyze model evaluation results, such as confusion matrix, ROC, RMSE.') evaluate_sub_commands = evaluate_parser.add_subparsers(dest='command') def _add_data_params_for_evaluate(parser): parser.add_argument('--csv', help='csv file path patterns.') parser.add_argument('--headers', help='csv file headers. Required if csv is specified and ' + 'predict_results_schema.json does not exist in the same directory.') parser.add_argument('--bigquery', help='can be bigquery table, query as a string, or ' + 'a pre-defined query (%%bq query --name).') evaluate_cm_parser = evaluate_sub_commands.add_parser( 'confusion_matrix', help='Get confusion matrix from evaluation results.') _add_data_params_for_evaluate(evaluate_cm_parser) evaluate_cm_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot confusion matrix as graph.') evaluate_cm_parser.add_argument('--size', type=int, default=10, help='The size of the confusion matrix.') evaluate_cm_parser.set_defaults(func=_evaluate_cm) evaluate_accuracy_parser = evaluate_sub_commands.add_parser( 'accuracy', help='Get accuracy results from classification evaluation results.') _add_data_params_for_evaluate(evaluate_accuracy_parser) evaluate_accuracy_parser.set_defaults(func=_evaluate_accuracy) evaluate_pr_parser = evaluate_sub_commands.add_parser( 'precision_recall', help='Get precision recall metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_pr_parser) evaluate_pr_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot precision recall as graph.') evaluate_pr_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_pr_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_pr_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_pr_parser.set_defaults(func=_evaluate_pr) evaluate_roc_parser = evaluate_sub_commands.add_parser( 'roc', help='Get ROC metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_roc_parser) evaluate_roc_parser.add_argument('--plot', action='store_true', default=False, help='Whether to plot ROC as graph.') evaluate_roc_parser.add_argument('--num_thresholds', type=int, default=20, help='Number of thresholds which determines how many ' + 'points in the graph.') evaluate_roc_parser.add_argument('--target_class', required=True, help='The target class to determine correctness of ' + 'a prediction.') evaluate_roc_parser.add_argument('--probability_column', help='The name of the column holding the probability ' + 'value of the target class. If absent, the value ' + 'of target class is used.') evaluate_roc_parser.set_defaults(func=_evaluate_roc) evaluate_regression_parser = evaluate_sub_commands.add_parser( 'regression', help='Get regression metrics from evaluation results.') _add_data_params_for_evaluate(evaluate_regression_parser) evaluate_regression_parser.set_defaults(func=_evaluate_regression) model_parser = parser.subcommand( 'model', help='Models and versions management such as deployment, deletion, listing.') model_sub_commands = model_parser.add_subparsers(dest='command') model_list_parser = model_sub_commands.add_parser( 'list', help='List models and versions.') model_list_parser.add_argument('--name', help='If absent, list all models of specified or current ' + 'project. If provided, list all versions of the ' + 'model.') model_list_parser.add_argument('--project', help='The project to list model(s) or version(s). If absent, ' + 'use Datalab\'s default project.') model_list_parser.set_defaults(func=_model_list) model_delete_parser = model_sub_commands.add_parser( 'delete', help='Delete models or versions.') model_delete_parser.add_argument('--name', required=True, help='If no "." in the name, try deleting the specified ' + 'model. If "model.version" is provided, try deleting ' + 'the specified version.') model_delete_parser.add_argument('--project', help='The project to delete model or version. If absent, ' + 'use Datalab\'s default project.') model_delete_parser.set_defaults(func=_model_delete) model_deploy_parser = model_sub_commands.add_parser( 'deploy', help='Deploy a model version.') model_deploy_parser.add_argument('--name', required=True, help='Must be model.version to indicate the model ' + 'and version name to deploy.') model_deploy_parser.add_argument('--path', required=True, help='The GCS path of the model to be deployed.') model_deploy_parser.add_argument('--runtime_version', help='The TensorFlow version to use for this model. ' + 'For example, "1.2.1". If absent, the current ' + 'TensorFlow version installed in Datalab will be used.') model_deploy_parser.add_argument('--project', help='The project to deploy a model version. If absent, ' + 'use Datalab\'s default project.') model_deploy_parser.set_defaults(func=_model_deploy) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
python
def as_dict(self): """ Json-serializable dict representation of CompleteDos. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "efermi": self.efermi, "structure": self.structure.as_dict(), "energies": list(self.energies), "densities": {str(spin): list(dens) for spin, dens in self.densities.items()}, "pdos": []} if len(self.pdos) > 0: for at in self.structure: dd = {} for orb, pdos in self.pdos[at].items(): dd[str(orb)] = {"densities": {str(int(spin)): list(dens) for spin, dens in pdos.items()}} d["pdos"].append(dd) d["atom_dos"] = {str(at): dos.as_dict() for at, dos in self.get_element_dos().items()} d["spd_dos"] = {str(orb): dos.as_dict() for orb, dos in self.get_spd_dos().items()} return d
python
def set_data(self, index, value): """Uses given data setter, and emit modelReset signal""" acces, field = self.get_item(index), self.header[index.column()] self.beginResetModel() self.set_data_hook(acces, field, value) self.endResetModel()
java
protected void persistSessionId(String location, String identifier, Long ksessionId) { if (location == null) { return; } FileOutputStream fos = null; ObjectOutputStream out = null; try { fos = new FileOutputStream(location + File.separator + identifier + "-jbpmSessionId.ser"); out = new ObjectOutputStream(fos); out.writeObject(Long.valueOf(ksessionId)); out.close(); } catch (IOException ex) { // logger.warn("Error when persisting known session id", ex); } finally { if (fos != null) { try { fos.close(); } catch (IOException e) { } } if (out != null) { try { out.close(); } catch (IOException e) { } } } }
python
def alias_name(): """ Returns list of alias name by query paramaters --- tags: - Query functions parameters: - name: alias_name in: query type: string required: false description: 'Other names used to refer to a gene' default: 'peptidase nexin-II' - name: is_previous_name in: query type: boolean required: false description: 'Other names used to refer to a gene' default: false - name: hgnc_symbol in: query type: string required: false description: 'HGNC symbol' default: APP - name: hgnc_identifier in: query type: integer required: false description: 'HGNC identifier' default: 620 - name: limit in: query type: integer required: false default: 1 """ allowed_str_args = ['alias_name', 'hgnc_symbol', 'hgnc_identifier'] allowed_int_args = ['limit', ] allowed_bool_args = ['is_previous_name', ] args = get_args( request_args=request.args, allowed_int_args=allowed_int_args, allowed_str_args=allowed_str_args, allowed_bool_args=allowed_bool_args, ) return jsonify(query.alias_name(**args))
python
def init(opts): ''' This function gets called when the proxy starts up. For login the protocol and port are cached. ''' log.debug('Initting esxcluster proxy module in process %s', os.getpid()) log.debug('Validating esxcluster proxy input') schema = EsxclusterProxySchema.serialize() log.trace('schema = %s', schema) proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {})) log.trace('proxy_conf = %s', proxy_conf) try: jsonschema.validate(proxy_conf, schema) except jsonschema.exceptions.ValidationError as exc: raise salt.exceptions.InvalidConfigError(exc) # Save mandatory fields in cache for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'): DETAILS[key] = proxy_conf[key] # Additional validation if DETAILS['mechanism'] == 'userpass': if 'username' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'username\' key found in proxy config.') if 'passwords' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'userpass\', but no ' '\'passwords\' key found in proxy config.') for key in ('username', 'passwords'): DETAILS[key] = proxy_conf[key] else: if 'domain' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'domain\' key found in proxy config.') if 'principal' not in proxy_conf: raise salt.exceptions.InvalidConfigError( 'Mechanism is set to \'sspi\', but no ' '\'principal\' key found in proxy config.') for key in ('domain', 'principal'): DETAILS[key] = proxy_conf[key] # Save optional DETAILS['protocol'] = proxy_conf.get('protocol') DETAILS['port'] = proxy_conf.get('port') # Test connection if DETAILS['mechanism'] == 'userpass': # Get the correct login details log.debug('Retrieving credentials and testing vCenter connection for ' 'mehchanism \'userpass\'') try: username, password = find_credentials() DETAILS['password'] = password except salt.exceptions.SaltSystemExit as err: log.critical('Error: %s', err) return False return True
python
def p_Dictionary(p): """Dictionary : dictionary IDENTIFIER Inheritance "{" DictionaryMembers "}" ";" """ p[0] = model.Dictionary(name=p[2], parent=p[3], members=p[5])
java
protected synchronized void removeAdminObjectService(ServiceReference<AdminObjectService> reference) { String id = (String) reference.getProperty(ADMIN_OBJECT_CFG_ID); if (id != null) { removeAdminObjectService(reference, id, false); String jndiName = (String) reference.getProperty(ADMIN_OBJECT_CFG_JNDI_NAME); if (jndiName != null && !jndiName.equals(id)) { removeAdminObjectService(reference, jndiName, true); } } }
java
private HashMap<String, String> parseProgressInfoLine(String line) { HashMap<String, String> table = null; Matcher m = PROGRESS_INFO_PATTERN.matcher(line); while (m.find()) { if (table == null) { table = new HashMap<>(); } String key = m.group(1); String value = m.group(2); table.put(key, value); } return table; }
java
public ValidationMappingDescriptor addNamespace(String name, String value) { model.attribute(name, value); return this; }
java
@Override protected void doGet(final HttpServletRequest req, final HttpServletResponse res) throws ServletException, IOException { LOG.debug("Entering"); final AtomHandler handler = createAtomRequestHandler(req, res); final String userName = handler.getAuthenticatedUsername(); if (userName != null) { final AtomRequest areq = new AtomRequestImpl(req); try { if (handler.isAtomServiceURI(areq)) { // return an Atom Service document final AtomService service = handler.getAtomService(areq); final Document doc = service.serviceToDocument(); res.setContentType("application/atomsvc+xml; charset=utf-8"); final Writer writer = res.getWriter(); final XMLOutputter outputter = new XMLOutputter(); outputter.setFormat(Format.getPrettyFormat()); outputter.output(doc, writer); writer.close(); res.setStatus(HttpServletResponse.SC_OK); } else if (handler.isCategoriesURI(areq)) { final Categories cats = handler.getCategories(areq); res.setContentType("application/xml"); final Writer writer = res.getWriter(); final Document catsDoc = new Document(); catsDoc.setRootElement(cats.categoriesToElement()); final XMLOutputter outputter = new XMLOutputter(); outputter.output(catsDoc, writer); writer.close(); res.setStatus(HttpServletResponse.SC_OK); } else if (handler.isCollectionURI(areq)) { // return a collection final Feed col = handler.getCollection(areq); col.setFeedType(FEED_TYPE); final WireFeedOutput wireFeedOutput = new WireFeedOutput(); final Document feedDoc = wireFeedOutput.outputJDom(col); res.setContentType("application/atom+xml; charset=utf-8"); final Writer writer = res.getWriter(); final XMLOutputter outputter = new XMLOutputter(); outputter.setFormat(Format.getPrettyFormat()); outputter.output(feedDoc, writer); writer.close(); res.setStatus(HttpServletResponse.SC_OK); } else if (handler.isEntryURI(areq)) { // return an entry final Entry entry = handler.getEntry(areq); if (entry != null) { res.setContentType("application/atom+xml; type=entry; charset=utf-8"); final Writer writer = res.getWriter(); Atom10Generator.serializeEntry(entry, writer); writer.close(); } else { res.setStatus(HttpServletResponse.SC_NOT_FOUND); } } else if (handler.isMediaEditURI(areq)) { final AtomMediaResource entry = handler.getMediaResource(areq); res.setContentType(entry.getContentType()); res.setContentLength((int) entry.getContentLength()); Utilities.copyInputToOutput(entry.getInputStream(), res.getOutputStream()); res.getOutputStream().flush(); res.getOutputStream().close(); } else { res.setStatus(HttpServletResponse.SC_NOT_FOUND); } } catch (final AtomException ae) { res.sendError(ae.getStatus(), ae.getMessage()); LOG.debug("An error occured while processing GET", ae); } catch (final Exception e) { res.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, e.getMessage()); LOG.debug("An error occured while processing GET", e); } } else { res.setHeader("WWW-Authenticate", "BASIC realm=\"AtomPub\""); res.sendError(HttpServletResponse.SC_UNAUTHORIZED); } LOG.debug("Exiting"); }
java
private TaskOperationProtocol getTaskManagerProxy() throws IOException { if (this.taskManager == null) { this.taskManager = RPC.getProxy(TaskOperationProtocol.class, new InetSocketAddress(getInstanceConnectionInfo().address(), getInstanceConnectionInfo().ipcPort()), NetUtils.getSocketFactory()); } return this.taskManager; }
java
public static void error(final Object message, final Throwable t) { errorStream.println(APP_ERROR + message.toString()); errorStream.println(stackTraceToString(t)); }
java
public void forEachOrderedInt(final IntConsumer consumer) { for (@DoNotSub int i = 0; i < size; i++) { consumer.accept(elements[i]); } }
python
def main(): """Shows useful information about how-to configure alias on a first run and configure automatically on a second. It'll be only visible when user type fuck and when alias isn't configured. """ settings.init() configuration_details = shell.how_to_configure() if ( configuration_details and configuration_details.can_configure_automatically ): if _is_already_configured(configuration_details): logs.already_configured(configuration_details) return elif _is_second_run(): _configure(configuration_details) logs.configured_successfully(configuration_details) return else: _record_first_run() logs.how_to_configure_alias(configuration_details)
java
public static ChunksManifestType createChunksManifestElementFrom( ChunksManifestBean manifest) { ChunksManifestType manifestType = ChunksManifestType.Factory .newInstance(); populateElementFromObject(manifestType, manifest); return manifestType; }
python
def get_path_components(directory): """Breaks a path to a directory into a (drive, list-of-folders) tuple :param directory: :return: a tuple consisting of the drive (if any) and an ordered list of folder names """ drive, dirs = os.path.splitdrive(directory) folders = [] previous = "" while dirs != previous and dirs != "": previous = dirs dirs, folder = os.path.split(dirs) if folder != "": folders.append(folder) folders.reverse() return drive, folders
java
public static Stream<DynamicTest> packageScan(String ... packagesToScan) { List<DynamicTest> tests = new ArrayList<>(); for (String packageScan : packagesToScan) { try { for (String fileNamePattern : Citrus.getXmlTestFileNamePattern()) { Resource[] fileResources = new PathMatchingResourcePatternResolver().getResources(packageScan.replace('.', File.separatorChar) + fileNamePattern); for (Resource fileResource : fileResources) { String filePath = fileResource.getFile().getParentFile().getCanonicalPath(); if (packageScan.startsWith("file:")) { filePath = "file:" + filePath; } filePath = filePath.substring(filePath.indexOf(packageScan.replace('.', File.separatorChar))); String testName = fileResource.getFilename().substring(0, fileResource.getFilename().length() - ".xml".length()); XmlTestLoader testLoader = new XmlTestLoader(DynamicTest.class, testName, filePath, citrus.getApplicationContext()); tests.add(DynamicTest.dynamicTest(testName, () -> citrus.run(testLoader.load()))); } } } catch (IOException e) { throw new CitrusRuntimeException("Unable to locate file resources for test package '" + packageScan + "'", e); } } return tests.stream(); }
java
public static cacheforwardproxy[] get(nitro_service service) throws Exception{ cacheforwardproxy obj = new cacheforwardproxy(); cacheforwardproxy[] response = (cacheforwardproxy[])obj.get_resources(service); return response; }
java
@SuppressWarnings("WeakerAccess") public static <T> T createClientProxy(ClassLoader classLoader, Class<T> proxyInterface, final JsonRpcClient client, Socket socket) throws IOException { return createClientProxy(classLoader, proxyInterface, client, socket.getInputStream(), socket.getOutputStream()); }
java
public void add(Model child) { if(child == null) throw new IllegalArgumentException("cannot add what is null"); //TODO: refactor this method MetaModel childMetaModel = metaModelOf(child.getClass()); MetaModel metaModel = metaModelLocal; if (getId() != null) { if (metaModel.hasAssociation(child.getClass(), OneToManyAssociation.class)) { OneToManyAssociation ass = metaModel.getAssociationForTarget(child.getClass(), OneToManyAssociation.class); String fkName = ass.getFkName(); child.set(fkName, getId()); child.saveIt();//this will cause an exception in case validations fail. }else if(metaModel.hasAssociation(child.getClass(), Many2ManyAssociation.class)){ Many2ManyAssociation ass = metaModel.getAssociationForTarget(child.getClass(), Many2ManyAssociation.class); if (child.getId() == null) { child.saveIt(); } MetaModel joinMetaModel = metaModelFor(ass.getJoin()); if (joinMetaModel == null) { new DB(metaModel.getDbName()).exec(metaModel.getDialect().insertManyToManyAssociation(ass), getId(), child.getId()); } else { //TODO: write a test to cover this case: //this is for Oracle, many 2 many, and all annotations used, including @IdGenerator. In this case, //it is best to delegate generation of insert to a model (sequences, etc.) try { Model joinModel = joinMetaModel.getModelClass().newInstance(); joinModel.set(ass.getSourceFkName(), getId()); joinModel.set(ass.getTargetFkName(), child.getId()); joinModel.saveIt(); } catch (InstantiationException e) { throw new InitException("failed to create a new instance of class: " + joinMetaModel.getClass() + ", are you sure this class has a default constructor?", e); } catch (IllegalAccessException e) { throw new InitException(e); } finally { Registry.cacheManager().purgeTableCache(ass.getJoin()); Registry.cacheManager().purgeTableCache(metaModel); Registry.cacheManager().purgeTableCache(childMetaModel); } } } else if(metaModel.hasAssociation(child.getClass(), OneToManyPolymorphicAssociation.class)) { OneToManyPolymorphicAssociation ass = metaModel.getAssociationForTarget(child.getClass(), OneToManyPolymorphicAssociation.class); child.set("parent_id", getId()); child.set("parent_type", ass.getTypeLabel()); child.saveIt(); }else throw new NotAssociatedException(getClass(), child.getClass()); } else { throw new IllegalArgumentException("You can only add associated model to an instance that exists in DB. Save this instance first, then you will be able to add dependencies to it."); } }
java
public ConditionCheck withExpressionAttributeNames(java.util.Map<String, String> expressionAttributeNames) { setExpressionAttributeNames(expressionAttributeNames); return this; }
python
def field_name_exist(self, field_name): """Check if there is already a field_name field in the current class It is useful before allowing to rename a field to check name does not already exist. """ fields = self.class_item.get_fields() for f in fields: if f.name == field_name: return True return False
python
def normalize_ip(ip): """ Transform the address into a fixed-length form, such as:: 192.168.0.1 -> 192.168.000.001 :type ip: string :param ip: An IP address. :rtype: string :return: The normalized IP. """ theip = ip.split('.') if len(theip) != 4: raise ValueError('ip should be 4 tuples') return '.'.join(str(int(l)).rjust(3, '0') for l in theip)
python
def delete(self, *args, **kwargs): """ This method implements retries for object deletion. """ count = 0 max_retries=3 while True: try: return super(BaseModel, self).delete(*args, **kwargs) except django.db.utils.OperationalError: if count >= max_retries: raise count += 1
python
def clone_data( self, source ): """Clone data from another Block. source Block instance to copy from. """ klass = self.__class__ assert isinstance( source, klass ) for name in klass._fields: self._field_data[name] = getattr( source, name )
python
def parseReaderConfig(self, confdict): """Parse a reader configuration dictionary. Examples: { Type: 23, Data: b'\x00' } { Type: 1023, Vendor: 25882, Subtype: 21, Data: b'\x00' } """ logger.debug('parseReaderConfig input: %s', confdict) conf = {} for k, v in confdict.items(): if not k.startswith('Parameter'): continue ty = v['Type'] data = v['Data'] vendor = None subtype = None try: vendor, subtype = v['Vendor'], v['Subtype'] except KeyError: pass if ty == 1023: if vendor == 25882 and subtype == 37: tempc = struct.unpack('!H', data)[0] conf.update(temperature=tempc) else: conf[ty] = data return conf
python
def handle_request(self, request, **resources): """ Call RPC method. :return object: call's result """ if request.method == 'OPTIONS': return super(RPCResource, self).handle_request( request, **resources) payload = request.data try: if request.method == 'GET': payload = request.GET.get('payload') try: payload = js.loads(payload) except TypeError: raise AssertionError("Invalid RPC Call.") if 'method' not in payload: raise AssertionError("Invalid RPC Call.") return self.rpc_call(request, **payload) except Exception as e: # noqa (any error) return SerializedHttpResponse(dict(error=dict(message=str(e))), error=True)
java
public void getEventsForJob(final JobID jobID, final List<AbstractEvent> eventList, final boolean includeManagementEvents) { synchronized (this.collectedEvents) { List<AbstractEvent> eventsForJob = this.collectedEvents.get(jobID); if (eventsForJob != null) { final Iterator<AbstractEvent> it = eventsForJob.iterator(); while (it.hasNext()) { final AbstractEvent event = it.next(); final boolean isManagementEvent = (event instanceof ManagementEvent); if (!isManagementEvent || includeManagementEvents) { eventList.add(event); } } } } }
python
def traverse(self): """Traverse the tree yielding the direction taken to a node, the co-ordinates of that node and the directions leading from the Node. Yields ------ (direction, (x, y), {:py:class:`~rig.routing_table.Routes`, ...}) Direction taken to reach a Node in the tree, the (x, y) co-ordinate of that Node and routes leading to children of the Node. """ # A queue of (direction, node) to visit. The direction is the Links # entry which describes the direction in which we last moved to reach # the node (or None for the root). to_visit = deque([(None, self)]) while to_visit: direction, node = to_visit.popleft() # Determine the set of directions we must travel to reach the # children out_directions = set() for child_direction, child in node.children: # Note that if the direction is unspecified, we simply # (silently) don't add a route for that child. if child_direction is not None: out_directions.add(child_direction) # Search the next steps of the route too if isinstance(child, RoutingTree): assert child_direction is not None to_visit.append((child_direction, child)) # Yield the information pertaining to this Node yield direction, node.chip, out_directions
python
def shap_values(self, X, **kwargs): """ Estimate the SHAP values for a set of samples. Parameters ---------- X : numpy.array or pandas.DataFrame or any scipy.sparse matrix A matrix of samples (# samples x # features) on which to explain the model's output. nsamples : "auto" or int Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The "auto" setting uses `nsamples = 2 * X.shape[1] + 2048`. l1_reg : "num_features(int)", "auto" (default for now, but deprecated), "aic", "bic", or float The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The auto option currently uses "aic" when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF "auto" WILL CHANGE in a future version to be based on num_features instead of AIC. The "aic" and "bic" options use the AIC and BIC rules for regularization. Using "num_features(int)" selects a fix number of top features. Passing a float directly sets the "alpha" parameter of the sklearn.linear_model.Lasso model used for feature selection. Returns ------- For models with a single output this returns a matrix of SHAP values (# samples x # features). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as expected_value attribute of the explainer). For models with vector outputs this returns a list of such matrices, one for each output. """ # convert dataframes if str(type(X)).endswith("pandas.core.series.Series'>"): X = X.values elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"): if self.keep_index: index_value = X.index.values index_name = X.index.name column_name = list(X.columns) X = X.values x_type = str(type(X)) arr_type = "'numpy.ndarray'>" # if sparse, convert to lil for performance if sp.sparse.issparse(X) and not sp.sparse.isspmatrix_lil(X): X = X.tolil() assert x_type.endswith(arr_type) or sp.sparse.isspmatrix_lil(X), "Unknown instance type: " + x_type assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!" # single instance if len(X.shape) == 1: data = X.reshape((1, X.shape[0])) if self.keep_index: data = convert_to_instance_with_index(data, column_name, index_name, index_value) explanation = self.explain(data, **kwargs) # vector-output s = explanation.shape if len(s) == 2: outs = [np.zeros(s[0]) for j in range(s[1])] for j in range(s[1]): outs[j] = explanation[:, j] return outs # single-output else: out = np.zeros(s[0]) out[:] = explanation return out # explain the whole dataset elif len(X.shape) == 2: explanations = [] for i in tqdm(range(X.shape[0]), disable=kwargs.get("silent", False)): data = X[i:i + 1, :] if self.keep_index: data = convert_to_instance_with_index(data, column_name, index_value[i:i + 1], index_name) explanations.append(self.explain(data, **kwargs)) # vector-output s = explanations[0].shape if len(s) == 2: outs = [np.zeros((X.shape[0], s[0])) for j in range(s[1])] for i in range(X.shape[0]): for j in range(s[1]): outs[j][i] = explanations[i][:, j] return outs # single-output else: out = np.zeros((X.shape[0], s[0])) for i in range(X.shape[0]): out[i] = explanations[i] return out
java
public UserManagedCacheBuilder<K, V, T> using(Service service) { UserManagedCacheBuilder<K, V, T> otherBuilder = new UserManagedCacheBuilder<>(this); if (service instanceof SizeOfEngineProvider) { removeAnySizeOfEngine(otherBuilder); } otherBuilder.services.add(service); return otherBuilder; }
python
def _format_firewall_stdout(cmd_ret): ''' Helper function to format the stdout from the get_firewall_status function. cmd_ret The return dictionary that comes from a cmd.run_all call. ''' ret_dict = {'success': True, 'rulesets': {}} for line in cmd_ret['stdout'].splitlines(): if line.startswith('Name'): continue if line.startswith('---'): continue ruleset_status = line.split() ret_dict['rulesets'][ruleset_status[0]] = bool(ruleset_status[1]) return ret_dict
python
def is_opened(components): """ Checks if all components are opened. To be checked components must implement [[IOpenable]] interface. If they don't the call to this method returns true. :param components: a list of components that are to be checked. :return: true if all components are opened and false if at least one component is closed. """ if components == None: return True result = True for component in components: result = result and Opener.is_opened_one(component) return result
python
def Iaax(mt, x, *args): """ (Iä)x : Returns the present value of annuity-certain at the beginning of the first year and increasing linerly. Arithmetically increasing annuity-anticipatory """ return Sx(mt, x) / Dx(mt, x)
python
def import_(module, objects=None, via=None): """ :param module: py3 compatiable module path :param objects: objects want to imported, it should be a list :param via: for some py2 module, you should give the import path according the objects which you want to imported :return: object or module """ if PY3: mod = __import__(module, fromlist=['*']) else: path = modules_mapping.get(module) if not path: raise Exception("Can't find the module %s in mappings." % module) if isinstance(path, list): if not via: raise Exception("You should give a via parameter to enable import from py2.") path = via mod = __import__(path, fromlist=['*']) if objects: if not isinstance(objects, (list, tuple)): raise Exception("objects parameter should be a list or tuple.") r = [] for x in objects: r.append(getattr(mod, x)) if len(r) > 1: return tuple(r) else: return r[0] else: return mod
python
def application(cls, f): """Decorate a function as responder that accepts the request as first argument. This works like the :func:`responder` decorator but the function is passed the request object as first argument and the request object will be closed automatically:: @Request.application def my_wsgi_app(request): return Response('Hello World!') :param f: the WSGI callable to decorate :return: a new WSGI callable """ #: return a callable that wraps the -2nd argument with the request #: and calls the function with all the arguments up to that one and #: the request. The return value is then called with the latest #: two arguments. This makes it possible to use this decorator for #: both methods and standalone WSGI functions. def application(*args): request = cls(args[-2]) with request: return f(*args[:-2] + (request,))(*args[-2:]) return update_wrapper(application, f)
python
def get_plate_stock(self, plate_code): """ 获取特定板块下的股票列表 :param plate_code: 板块代码, string, 例如,”SH.BK0001”,”SH.BK0002”,先利用获取子版块列表函数获取子版块代码 :return: (ret, data) ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下 ret != RET_OK 返回错误字符串 ===================== =========== ============================================================== 参数 类型 说明 ===================== =========== ============================================================== code str 股票代码 lot_size int 每手股数 stock_name str 股票名称 stock_owner str 所属正股的代码 stock_child_type str 股票子类型,参见WrtType stock_type str 股票类型,参见SecurityType list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间) stock_id int 股票id ===================== =========== ============================================================== """ if plate_code is None or is_str(plate_code) is False: error_str = ERROR_STR_PREFIX + "the type of code is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp) kargs = { "plate_code": plate_code, "conn_id": self.get_sync_conn_id() } ret_code, msg, plate_stock_list = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg col_list = [ 'code', 'lot_size', 'stock_name', 'stock_owner', 'stock_child_type', 'stock_type', 'list_time', 'stock_id', ] plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list) return RET_OK, plate_stock_table
python
def _get_cached_time(self): """Method that will allow for consistent modified and archived timestamps. Returns: self.Meta.datetime: This method will return a datetime that is compatible with the current class's datetime library. """ if not self._cached_time: self._cached_time = self._meta.datetime.utcnow() return self._cached_time
python
def interm_size(self)-> Sequence[Sequence[int]]: '''The size of each intermediate fluent in canonical order. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent. ''' fluents = self.domain.intermediate_fluents ordering = self.domain.interm_fluent_ordering return self._fluent_size(fluents, ordering)
java
public void assertNonExisting(CryptoPath cleartextPath) throws FileAlreadyExistsException, IOException { try { CiphertextFileType type = getCiphertextFileType(cleartextPath); throw new FileAlreadyExistsException(cleartextPath.toString(), null, "For this path there is already a " + type.name()); } catch (NoSuchFileException e) { // good! } }
java
public void findInPackage(final Test test, String packageName) { packageName = packageName.replace('.', '/'); final ClassLoader loader = getClassLoader(); Enumeration<URL> urls; try { urls = loader.getResources(packageName); } catch (final IOException ioe) { LOGGER.warn("Could not read package: " + packageName, ioe); return; } while (urls.hasMoreElements()) { try { final URL url = urls.nextElement(); final String urlPath = extractPath(url); LOGGER.info("Scanning for classes in [" + urlPath + "] matching criteria: " + test); // Check for a jar in a war in JBoss if (VFSZIP.equals(url.getProtocol())) { final String path = urlPath.substring(0, urlPath.length() - packageName.length() - 2); final URL newURL = new URL(url.getProtocol(), url.getHost(), path); @SuppressWarnings("resource") final JarInputStream stream = new JarInputStream(newURL.openStream()); try { loadImplementationsInJar(test, packageName, path, stream); } finally { close(stream, newURL); } } else if (BUNDLE_RESOURCE.equals(url.getProtocol()) || BUNDLE.equals(url.getProtocol())) { loadImplementationsInBundle(test, packageName); } else { final File file = new File(urlPath); if (file.isDirectory()) { loadImplementationsInDirectory(test, packageName, file); } else { loadImplementationsInJar(test, packageName, file); } } } catch (final IOException ioe) { LOGGER.warn("could not read entries", ioe); } catch (final URISyntaxException e) { LOGGER.warn("could not read entries", e); } } }
python
def sign_in(user, user_type=None, date=None, time_in=None): """Add a new entry to the timesheet. :param user: `models.User` object. The user to sign in. :param user_type: (optional) Specify whether user is signing in as a `'student'` or `'tutor'`. :param date: (optional) `datetime.date` object. Specify the entry date. :param time_in: (optional) `datetime.time` object. Specify the sign in time. :return: The new entry. """ # noqa now = datetime.today() if date is None: date = now.date() if time_in is None: time_in = now.time() if user_type is None: if user.is_student and user.is_tutor: raise AmbiguousUserType('User is both a student and a tutor.') elif user.is_student: user_type = 'student' elif user.is_tutor: user_type = 'tutor' else: raise ValueError('Unknown user type.') new_entry = Entry( uuid=str(uuid.uuid4()), date=date, time_in=time_in, time_out=None, user_id=user.user_id, user_type=user_type, user=user, ) logger.info('{} ({}) signed in.'.format(new_entry.user_id, new_entry.user_type)) return new_entry
java
@Override public void mousePressed(MouseEvent event) { lastClick = event.getButton(); clicks[lastClick] = true; final Integer key = Integer.valueOf(lastClick); if (actionsPressed.containsKey(key)) { final List<EventAction> actions = actionsPressed.get(key); for (final EventAction current : actions) { current.action(); } } }
python
def compute_node_deps(): """ - returns the full dependency graph of ALL ops and ALL tensors Map<string,list<string>> where key=node name, values=list of dependency names If an Op takes in a placeholder tensor that is the ouput of a PythonOp, we need to replace that Placeholder with the PythonOp. """ deps={} g=tf.get_default_graph() for op in g.get_operations(): d=set([i.name for i in op.control_inputs]) for t in op.inputs: if is_htop_out(t): d.add(get_op(t).name) else: d.add(t.name) deps[op.name]=d for t in op.outputs: deps[t.name]=set([op.name]) # do the same thing with HTOps for op in _ops.values(): d=set() for t in op.inputs: if is_htop_out(t): d.add(get_op(t).name) else: d.add(t.name) deps[op.name]=d return deps
python
def register_mime(shortname, mime_types): """ Register a new mime type. Usage example: mimerender.register_mime('svg', ('application/x-svg', 'application/svg+xml',)) After this you can do: @mimerender.mimerender(svg=render_svg) def GET(... ... """ if shortname in _MIME_TYPES: raise MimeRenderException('"%s" has already been registered'%shortname) _MIME_TYPES[shortname] = mime_types
java
public static nslimitidentifier_nslimitsessions_binding[] get_filtered(nitro_service service, String limitidentifier, filtervalue[] filter) throws Exception{ nslimitidentifier_nslimitsessions_binding obj = new nslimitidentifier_nslimitsessions_binding(); obj.set_limitidentifier(limitidentifier); options option = new options(); option.set_filter(filter); nslimitidentifier_nslimitsessions_binding[] response = (nslimitidentifier_nslimitsessions_binding[]) obj.getfiltered(service, option); return response; }
python
def transform_search_hit(self, pid, record_hit, links_factory=None): """Transform search result hit into an intermediate representation. :param pid: The :class:`invenio_pidstore.models.PersistentIdentifier` instance. :param record_hit: A dictionary containing a ``'_source'`` key with the record data. :param links_factory: The link factory. (Default: ``None``) :returns: The intermediate representation for the record. """ return self.dump(self.preprocess_search_hit(pid, record_hit, links_factory=links_factory))