language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java
|
public synchronized static void assimilate()
{
if (assimilated) {
return;
}
assimilated = true;
// Assimilate java.util.logging
final Logger rootLogger = LogManager.getLogManager().getLogger("");
final Handler[] handlers = rootLogger.getHandlers();
if (handlers != null) {
for (Handler handler : handlers) {
rootLogger.removeHandler(handler);
}
}
SLF4JBridgeHandler.install();
Log.forClass(AssimilateForeignLogging.class).info("java.util.logging was assimilated.");
}
|
python
|
def _get_retro_id(cls, home_team_id, timestamp, game_number):
"""
get retro id
:param home_team_id: home team id
:param timestamp: game day
:param game_number: game number
:return: retro id
"""
return '{home_team_id}{year}{month}{day}{game_number}'.format(
**{
'home_team_id': home_team_id.upper(),
'year': timestamp.year,
'month': timestamp.strftime('%m'),
'day': timestamp.strftime('%d'),
'game_number': int(game_number)-1,
}
)
|
python
|
def reguid(zpool):
'''
Generates a new unique identifier for the pool
.. warning::
You must ensure that all devices in this pool are online and healthy
before performing this action.
zpool : string
name of storage pool
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.reguid myzpool
'''
## generate new GUID for pool
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='reguid',
target=zpool,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'reguided')
|
python
|
def add_ipv4addr(self, ipv4addr):
"""Add an IPv4 address to the host.
:param str ipv4addr: The IP address to add.
:raises: ValueError
"""
for addr in self.ipv4addrs:
if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or
(isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)):
raise ValueError('Already exists')
self.ipv4addrs.append({'ipv4addr': ipv4addr})
|
python
|
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
|
java
|
@Deprecated
public NearCacheConfig setEvictionPolicy(String evictionPolicy) {
this.evictionPolicy = checkNotNull(evictionPolicy, "Eviction policy cannot be null!");
this.evictionConfig.setEvictionPolicy(EvictionPolicy.valueOf(evictionPolicy));
this.evictionConfig.setMaximumSizePolicy(ENTRY_COUNT);
return this;
}
|
python
|
def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for key, value in iteritems(plist):
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist
|
java
|
public static int sum (int[] list)
{
int total = 0, lsize = list.length;
for (int ii = 0; ii < lsize; ii++) {
total += list[ii];
}
return total;
}
|
java
|
public List<CustomWindowStateType<PortletAppType<T>>> getAllCustomWindowState()
{
List<CustomWindowStateType<PortletAppType<T>>> list = new ArrayList<CustomWindowStateType<PortletAppType<T>>>();
List<Node> nodeList = childNode.get("custom-window-state");
for(Node node: nodeList)
{
CustomWindowStateType<PortletAppType<T>> type = new CustomWindowStateTypeImpl<PortletAppType<T>>(this, "custom-window-state", childNode, node);
list.add(type);
}
return list;
}
|
java
|
public static String extractRegexGroup(String fromContent, String regex, int groupNumber) throws Exception {
if (regex == null) {
throw new Exception("Cannot extract regex group because the provided regular expression is null.");
}
Pattern expectedPattern = Pattern.compile(regex);
return extractRegexGroup(fromContent, expectedPattern, groupNumber);
}
|
python
|
def netconf_state_statistics_in_rpcs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
statistics = ET.SubElement(netconf_state, "statistics")
in_rpcs = ET.SubElement(statistics, "in-rpcs")
in_rpcs.text = kwargs.pop('in_rpcs')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
java
|
private Object extractAttributeValueIfAttributeQueryConstant(String attributeName) {
if (KEY_ATTRIBUTE_NAME.value().equals(attributeName)) {
return getKey();
} else if (THIS_ATTRIBUTE_NAME.value().equals(attributeName)) {
return getValue();
}
return null;
}
|
java
|
public static JSONObject copy(final JSONObject src, final boolean deep) {
final JSONObject dest = new JSONObject();
Iterator<String> keys = src.keys();
while (keys.hasNext()) {
final String key = keys.next();
final Object value = src.opt(key);
if (deep) {
if (value instanceof JSONObject) {
safePut(dest, key, copy((JSONObject) value, deep));
} else if (value instanceof JSONArray) {
safePut(dest, key, copy((JSONArray) value, deep));
} else {
safePut(dest, key, value);
}
} else {
safePut(dest, key, value);
}
}
return dest;
}
|
python
|
def start(self):
"""
function to initialize thread for downloading
"""
global parallel
for self.i in range(0, self.length):
if parallel:
self.thread.append(myThread(self.url[ self.i ], self.directory, self.i,
self.min_file_size, self.max_file_size, self.no_redirects))
else:
# if not parallel whole url list is passed
self.thread.append(myThread(self.url, self.directory, self.i , self.min_file_size,
self.max_file_size, self.no_redirects))
self.progress[self.i]["value"] = 0
self.bytes[self.i] = 0
self.thread[self.i].start()
self.read_bytes()
|
java
|
private void initAttributes(Context context, AttributeSet attributeSet) {
TypedArray typedArray = context.obtainStyledAttributes(attributeSet, R.styleable.MotionView);
friction = typedArray.getFloat(R.styleable.MotionView_friction, 0.75f) * 1000;
typedArray.recycle();
}
|
python
|
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
|
java
|
public static JSONObject toJSONObject(java.util.Properties properties) throws JSONException {
// can't use the new constructor for Android support
// JSONObject jo = new JSONObject(properties == null ? 0 : properties.size());
JSONObject jo = new JSONObject();
if (properties != null && !properties.isEmpty()) {
Enumeration<?> enumProperties = properties.propertyNames();
while(enumProperties.hasMoreElements()) {
String name = (String)enumProperties.nextElement();
jo.put(name, properties.getProperty(name));
}
}
return jo;
}
|
java
|
public String[] getPartitionKeys(String location, Job job) throws IOException
{
if (!usePartitionFilter)
return null;
List<ColumnDef> indexes = getIndexes();
String[] partitionKeys = new String[indexes.size()];
for (int i = 0; i < indexes.size(); i++)
{
partitionKeys[i] = new String(indexes.get(i).getName());
}
return partitionKeys;
}
|
python
|
def select(self, domain_or_name, query='', next_token=None,
consistent_read=False):
"""
Returns a set of Attributes for item names within domain_name that
match the query. The query must be expressed in using the SELECT
style syntax rather than the original SimpleDB query language.
Even though the select request does not require a domain object,
a domain object must be passed into this method so the Item objects
returned can point to the appropriate domain.
:type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
:param domain_or_name: Either the name of a domain or a Domain object
:type query: string
:param query: The SimpleDB query to be performed.
:type consistent_read: bool
:param consistent_read: When set to true, ensures that the most recent
data is returned.
:rtype: ResultSet
:return: An iterator containing the results.
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'SelectExpression' : query}
if consistent_read:
params['ConsistentRead'] = 'true'
if next_token:
params['NextToken'] = next_token
try:
return self.get_list('Select', params, [('Item', self.item_cls)],
parent=domain)
except SDBResponseError, e:
e.body = "Query: %s\n%s" % (query, e.body)
raise e
|
python
|
def number(input):
"""Convert the given input to a floating point or integer value.
In cases of ambiguity, integers will be prefered to floating point.
:param input: the value to convert to a number
:type input: any
:returns: converted integer value
:rtype: float or int
"""
try:
return int(input)
except (TypeError, ValueError):
pass
try:
return float(input)
except (TypeError, ValueError):
raise ValueError("Unable to convert {0!r} to a number.".format(input))
|
java
|
public TrackInfos getTrackInfos(final Integer msgId, Application app)
throws UnknownMessageIdException {
logger.info("WS SendTrack receives the client request with parameter msgId = " + msgId);
TrackInfos infos = new TrackInfos();
infos.setNbDestTotal(daoService.getNbDest(msgId, app));
if (infos.getNbDestTotal() == 0) {
throw new UnknownMessageIdException();
}
infos.setNbDestBlackList(daoService.getNbSmsWithState(msgId, app, blacklistStatuses()));
infos.setNbSentSMS(daoService.getNbSentSMS(msgId, app));
infos.setNbProgressSMS(daoService.getNbProgressSMS(msgId, app));
infos.setNbErrorSMS(daoService.getNbErrorSMS(msgId, app, errorStatuses()));
infos.setListNumErreur(sms2phones(daoService.getListNumErreur(msgId, app, errorStatuses())));
logger.info("Response TrackInfos object, for the client of WS SendTrack : " +
"TrackInfos.NbDestTotal : " + infos.getNbDestTotal().toString() +
"TrackInfos.NbSentSMS : " + infos.getNbSentSMS().toString() +
"TrackInfos.NbProgressSMS : " + infos.getNbProgressSMS().toString() +
"TrackInfos.NbDestBlackList :" + infos.getNbDestBlackList().toString() +
"TrackInfos.NbErrorSMS : " + infos.getNbErrorSMS().toString());
Set<String> listnums = infos.getListNumErreur();
for (String phone : listnums) {
logger.info("TrackInfos.NumErreur : " + phone);
}
return infos;
}
|
python
|
def tty_create_child(args):
"""
Return a file descriptor connected to the master end of a pseudo-terminal,
whose slave end is connected to stdin/stdout/stderr of a new child process.
The child is created such that the pseudo-terminal becomes its controlling
TTY, ensuring access to /dev/tty returns a new file descriptor open on the
slave end.
:param list args:
:py:func:`os.execl` argument list.
:returns:
`(pid, tty_fd, None)`
"""
master_fd, slave_fd = openpty()
try:
mitogen.core.set_block(slave_fd)
disable_echo(master_fd)
disable_echo(slave_fd)
pid = detach_popen(
args=args,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
preexec_fn=_acquire_controlling_tty,
close_fds=True,
)
except Exception:
os.close(master_fd)
os.close(slave_fd)
raise
os.close(slave_fd)
LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s',
pid, master_fd, os.getpid(), Argv(args))
return pid, master_fd, None
|
java
|
public void init() {
nodeState = StateHelper.getStateImpl();
try {
nodeState.load();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
nodesSet = nodeState.getNodes();
Iterator it = nodesSet.iterator();
allNodes = new Node[nodesSet.size()];
int i = 0;
while (it.hasNext()) {
allNodes[i++] = (Node) it.next();
}
isInit = true;
}
|
java
|
public static InternationalFixedDate now(Clock clock) {
LocalDate now = LocalDate.now(clock);
return InternationalFixedDate.ofEpochDay(now.toEpochDay());
}
|
python
|
def simulate_experiment(self, modelparams, expparams, repeat=1):
"""
Produces data according to the given model parameters and experimental
parameters, structured as a NumPy array.
:param np.ndarray modelparams: A shape ``(n_models, n_modelparams)``
array of model parameter vectors describing the hypotheses under
which data should be simulated.
:param np.ndarray expparams: A shape ``(n_experiments, )`` array of
experimental control settings, with ``dtype`` given by
:attr:`~qinfer.Model.expparams_dtype`, describing the
experiments whose outcomes should be simulated.
:param int repeat: How many times the specified experiment should
be repeated.
:rtype: np.ndarray
:return: A three-index tensor ``data[i, j, k]``, where ``i`` is the repetition,
``j`` indexes which vector of model parameters was used, and where
``k`` indexes which experimental parameters where used. If ``repeat == 1``,
``len(modelparams) == 1`` and ``len(expparams) == 1``, then a scalar
datum is returned instead.
"""
self._sim_count += modelparams.shape[0] * expparams.shape[0] * repeat
assert(self.are_expparam_dtypes_consistent(expparams))
|
python
|
def column_exists(cr, table, column):
""" Check whether a certain column exists """
cr.execute(
'SELECT count(attname) FROM pg_attribute '
'WHERE attrelid = '
'( SELECT oid FROM pg_class WHERE relname = %s ) '
'AND attname = %s',
(table, column))
return cr.fetchone()[0] == 1
|
java
|
public void add(final Condition condition) {
if (andConditions.isEmpty()) {
andConditions.add(new AndCondition());
}
andConditions.get(0).getConditions().add(condition);
}
|
python
|
def load_cities(self, filename):
"""
Load up all cities in lowercase for easier matching. The file should have one city per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
"""
with open(filename, 'r') as f:
for line in f:
self.cities.append(line.strip().lower())
|
python
|
def register_shipping_query_handler(self, callback, *custom_filters, state=None, run_task=None,
**kwargs):
"""
Register handler for shipping query
Example:
.. code-block:: python3
dp.register_shipping_query_handler(some_shipping_query_handler, lambda shipping_query: True)
:param callback:
:param state:
:param custom_filters:
:param run_task: run callback in task (no wait results)
:param kwargs:
"""
filters_set = self.filters_factory.resolve(self.shipping_query_handlers,
*custom_filters,
state=state,
**kwargs)
self.shipping_query_handlers.register(self._wrap_async_task(callback, run_task), filters_set)
|
java
|
protected String getArtifactId(String artifactId) {
return this.artifactId == null || this.artifactId.length() == 0
? artifactId
: this.artifactId;
}
|
python
|
def set(self, instance, value, **kwargs):
"""writes the value to the same named field on the proxy object
"""
# Retrieve the proxy object
proxy_object = self.get_proxy(instance)
# Return None if we could not find a proxied object, e.g. through
# the proxy expression 'context.getSample()' on an AR
if not proxy_object:
logger.debug("Expression '{}' did not return a valid Proxy Object on {}"
.format(self.proxy, instance))
return None
# Lookup the proxied field by name
field_name = self.getName()
field = proxy_object.getField(field_name)
# Bail out if the proxy object has no identical named field.
if field is None:
raise KeyError("Object '{}' with id '{}' has no field named '{}'".format(
proxy_object.portal_type, proxy_object.getId(), field_name))
# set the value on the proxy object
field.set(proxy_object, value, **kwargs)
# get the current time
now = DateTime.DateTime()
# update the modification date of the proxied object
proxy_object.setModificationDate(now)
# update the modification date of the holding object
instance.setModificationDate(now)
|
java
|
public com.google.api.ads.admanager.axis.v201902.DateRange getTimeSeriesDateRange() {
return timeSeriesDateRange;
}
|
python
|
def import_name(app, name):
"""Import the given name and return name, obj, parent, mod_name
:param name: name to import
:type name: str
:returns: the imported object or None
:rtype: object | None
:raises: None
"""
try:
logger.debug('Importing %r', name)
name, obj = autosummary.import_by_name(name)[:2]
logger.debug('Imported %s', obj)
return obj
except ImportError as e:
logger.warn("Jinjapidoc failed to import %r: %s", name, e)
|
java
|
private CodeableConcept makeUnits(String text, String ucum) {
if (Utilities.noString(text) && Utilities.noString(ucum))
return null;
CodeableConcept cc = new CodeableConcept();
cc.setText(text);
cc.getCoding().add(new Coding().setCode(ucum).setSystem("http://unitsofmeasure.org"));
return cc;
}
|
python
|
def execstr(self, local_name):
"""returns a string which when evaluated will
add the stored variables to the current namespace
localname is the name of the variable in the current scope
* use locals().update(dyn.to_dict()) instead
"""
execstr = ''
for (key, val) in six.iteritems(self.__dict__):
if key not in self._printable_exclude:
execstr += key + ' = ' + local_name + '.' + key + '\n'
return execstr
|
python
|
def get_learning_curves(self, lc_extractor=extract_HBS_learning_curves, config_ids=None):
"""
extracts all learning curves from all run configurations
Parameters
----------
lc_extractor: callable
a function to return a list of learning_curves.
defaults to hpbanster.HB_result.extract_HP_learning_curves
config_ids: list of valid config ids
if only a subset of the config ids is wanted
Returns
-------
dict
a dictionary with the config_ids as keys and the
learning curves as values
"""
config_ids = self.data.keys() if config_ids is None else config_ids
lc_dict = {}
for id in config_ids:
runs = self.get_runs_by_id(id)
lc_dict[id] = lc_extractor(runs)
return(lc_dict)
|
java
|
private void restoreWithRescaling(Collection<KeyedStateHandle> restoreStateHandles) throws Exception {
// Prepare for restore with rescaling
KeyedStateHandle initialHandle = RocksDBIncrementalCheckpointUtils.chooseTheBestStateHandleForInitial(
restoreStateHandles, keyGroupRange);
// Init base DB instance
if (initialHandle != null) {
restoreStateHandles.remove(initialHandle);
initDBWithRescaling(initialHandle);
} else {
openDB();
}
// Transfer remaining key-groups from temporary instance into base DB
byte[] startKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getStartKeyGroup(), startKeyGroupPrefixBytes);
byte[] stopKeyGroupPrefixBytes = new byte[keyGroupPrefixBytes];
RocksDBKeySerializationUtils.serializeKeyGroup(keyGroupRange.getEndKeyGroup() + 1, stopKeyGroupPrefixBytes);
for (KeyedStateHandle rawStateHandle : restoreStateHandles) {
if (!(rawStateHandle instanceof IncrementalRemoteKeyedStateHandle)) {
throw new IllegalStateException("Unexpected state handle type, " +
"expected " + IncrementalRemoteKeyedStateHandle.class +
", but found " + rawStateHandle.getClass());
}
Path temporaryRestoreInstancePath = new Path(instanceBasePath.getAbsolutePath() + UUID.randomUUID().toString());
try (RestoredDBInstance tmpRestoreDBInfo = restoreDBInstanceFromStateHandle(
(IncrementalRemoteKeyedStateHandle) rawStateHandle,
temporaryRestoreInstancePath);
RocksDBWriteBatchWrapper writeBatchWrapper = new RocksDBWriteBatchWrapper(this.db)) {
List<ColumnFamilyDescriptor> tmpColumnFamilyDescriptors = tmpRestoreDBInfo.columnFamilyDescriptors;
List<ColumnFamilyHandle> tmpColumnFamilyHandles = tmpRestoreDBInfo.columnFamilyHandles;
// iterating only the requested descriptors automatically skips the default column family handle
for (int i = 0; i < tmpColumnFamilyDescriptors.size(); ++i) {
ColumnFamilyHandle tmpColumnFamilyHandle = tmpColumnFamilyHandles.get(i);
ColumnFamilyHandle targetColumnFamilyHandle = getOrRegisterStateColumnFamilyHandle(
null, tmpRestoreDBInfo.stateMetaInfoSnapshots.get(i))
.columnFamilyHandle;
try (RocksIteratorWrapper iterator = RocksDBOperationUtils.getRocksIterator(tmpRestoreDBInfo.db, tmpColumnFamilyHandle)) {
iterator.seek(startKeyGroupPrefixBytes);
while (iterator.isValid()) {
if (RocksDBIncrementalCheckpointUtils.beforeThePrefixBytes(iterator.key(), stopKeyGroupPrefixBytes)) {
writeBatchWrapper.put(targetColumnFamilyHandle, iterator.key(), iterator.value());
} else {
// Since the iterator will visit the record according to the sorted order,
// we can just break here.
break;
}
iterator.next();
}
} // releases native iterator resources
}
} finally {
cleanUpPathQuietly(temporaryRestoreInstancePath);
}
}
}
|
python
|
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
unicodeFilePath = ustr(filePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
# Label xml file and show bound box according to its filename
# if self.usingPascalVocFormat is True:
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(self.filePath)[0])
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
|
java
|
public Quaterniond rotationYXZ(double angleY, double angleX, double angleZ) {
double sx = Math.sin(angleX * 0.5);
double cx = Math.cosFromSin(sx, angleX * 0.5);
double sy = Math.sin(angleY * 0.5);
double cy = Math.cosFromSin(sy, angleY * 0.5);
double sz = Math.sin(angleZ * 0.5);
double cz = Math.cosFromSin(sz, angleZ * 0.5);
double x = cy * sx;
double y = sy * cx;
double z = sy * sx;
double w = cy * cx;
this.x = x * cz + y * sz;
this.y = y * cz - x * sz;
this.z = w * sz - z * cz;
this.w = w * cz + z * sz;
return this;
}
|
python
|
def contents_equal(self, other, **kwargs):
"""
Test the equality of the fileset contents with another fileset. If the
fileset's format implements a 'contents_equal' method than that is used
to determine the equality, otherwise a straight comparison of the
checksums is used.
Parameters
----------
other : Fileset
The other fileset to compare to
"""
if hasattr(self.format, 'contents_equal'):
equal = self.format.contents_equal(self, other, **kwargs)
else:
equal = (self.checksums == other.checksums)
return equal
|
python
|
def get_adjacent_index(I, shape, size):
"""
Find indices 2d-adjacent to those in I. Helper function for get_border*.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region
shape : tuple(int, int)
region shape
size : int
region size (technically computable from shape)
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally adjacent to I
"""
m, n = shape
In = I % n
bL = In != 0
bR = In != n-1
J = np.concatenate([
# orthonally adjacent
I - n,
I[bL] - 1,
I[bR] + 1,
I + n,
# diagonally adjacent
I[bL] - n-1,
I[bR] - n+1,
I[bL] + n-1,
I[bR] + n+1])
# remove indices outside the array
J = J[(J>=0) & (J<size)]
return J
|
python
|
def _ctrl_meas(self):
"""Value to be written to the device's ctrl_meas register """
ctrl_meas = (self.overscan_temperature << 5)
ctrl_meas += (self.overscan_pressure << 2)
ctrl_meas += self.mode
return ctrl_meas
|
java
|
public Map<String, String> getCellValuesAsMap(String regex, int groupIdx,
List<ReplacePattern> replaces) {
Map<String, String> map = new TreeMap<String, String>();
Pattern pattern = Pattern.compile(regex);
for (Entry<String, String> entry : getData().entrySet()) {
Matcher matcher = pattern.matcher(entry.getKey());
if (matcher.matches()) {
String value = value(entry.getValue(), replaces);
map.put(matcher.group(groupIdx), value);
}
}
return map;
}
|
java
|
@Override
public RandomVariable getBrownianIncrement(int timeIndex, int factor) {
RandomVariable brownianIncrement = new RandomVariableFromDoubleArray(0.0);
for(int factorIndex=0; factorIndex<factorLoadings[factor].length; factorIndex++) {
if(factorLoadings[factor][factorIndex] != 0) {
RandomVariable independentFactor = uncollelatedFactors.getBrownianIncrement(timeIndex, factorIndex);
brownianIncrement = brownianIncrement.addProduct(independentFactor, factorLoadings[factor][factorIndex]);
}
}
return brownianIncrement;
}
|
python
|
def has_pkgs_signed_with(self, allowed_keys):
"""
Check signature of packages installed in image.
Raises exception when
* rpm binary is not installed in image
* parsing of rpm fails
* there are packages in image that are not signed with one of allowed keys
:param allowed_keys: list of allowed keys
:return: bool
"""
if not allowed_keys or not isinstance(allowed_keys, list):
raise ConuException("allowed_keys must be a list")
command = ['rpm', '-qa', '--qf', '%{name} %{SIGPGP:pgpsig}\n']
cont = self.run_via_binary(command=command)
try:
out = cont.logs_unicode()[:-1].split('\n')
check_signatures(out, allowed_keys)
finally:
cont.stop()
cont.delete()
return True
|
python
|
def read_tf_checkpoint(path):
"""read tensorflow checkpoint"""
from tensorflow.python import pywrap_tensorflow
tensors = {}
reader = pywrap_tensorflow.NewCheckpointReader(path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
tensor = reader.get_tensor(key)
tensors[key] = tensor
return tensors
|
java
|
private void resolve(Collection<Tile> resolved, Collection<Tile> toResolve, Tile tile)
{
updateTile(resolved, toResolve, tile, -1, 0);
updateTile(resolved, toResolve, tile, 1, 0);
updateTile(resolved, toResolve, tile, 0, 1);
updateTile(resolved, toResolve, tile, 0, -1);
updateTile(resolved, toResolve, tile, -1, 1);
updateTile(resolved, toResolve, tile, 1, 1);
updateTile(resolved, toResolve, tile, -1, -1);
updateTile(resolved, toResolve, tile, 1, -1);
}
|
python
|
def _parse_all_merged_entities(self):
"""set self._all_merged_entities to the longest possible(wrapping)
tokens including non-entity tokens
"""
self._all_merged_entities = list(filterfalse(
lambda token: self._is_wrapped(token, self.all_entities),
self.all_entities))
|
python
|
def fabs(x):
"""
Absolute value function
"""
if isinstance(x, UncertainFunction):
mcpts = np.fabs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.fabs(x)
|
python
|
def get_scorer(scoring, compute=True):
"""Get a scorer from string
Parameters
----------
scoring : str | callable
scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
# This is the same as sklearns, only we use our SCORERS dict,
# and don't have back-compat code
if isinstance(scoring, six.string_types):
try:
scorer, kwargs = SCORERS[scoring]
except KeyError:
raise ValueError(
"{} is not a valid scoring value. "
"Valid options are {}".format(scoring, sorted(SCORERS))
)
else:
scorer = scoring
kwargs = {}
kwargs["compute"] = compute
return make_scorer(scorer, **kwargs)
|
python
|
def build(self):
"""
Build the schema (object graph) using the root node
using the factory.
- Build the graph.
- Collate the children.
"""
self.children = BasicFactory.build(self.root, self)
collated = BasicFactory.collate(self.children)
self.children = collated[0]
self.attributes = collated[2]
self.imports = collated[1]
self.elements = collated[3]
self.types = collated[4]
self.groups = collated[5]
self.agrps = collated[6]
|
java
|
public void readExternal(PofReader reader)
throws IOException {
super.readExternal(reader);
numInc = (Number) reader.readObject(10);
fPostInc = reader.readBoolean(11);
}
|
python
|
def show_window_options(self, option=None, g=False):
"""
Return a dict of options for the window.
For familiarity with tmux, the option ``option`` param forwards to
pick a single option, forwarding to :meth:`Window.show_window_option`.
Parameters
----------
option : str, optional
show a single option.
g : str, optional
Pass ``-g`` flag for global variable, default False.
Returns
-------
dict
"""
tmux_args = tuple()
if g:
tmux_args += ('-g',)
if option:
return self.show_window_option(option, g=g)
else:
tmux_args += ('show-window-options',)
cmd = self.cmd(*tmux_args).stdout
# The shlex.split function splits the args at spaces, while also
# retaining quoted sub-strings.
# shlex.split('this is "a test"') => ['this', 'is', 'a test']
cmd = [tuple(shlex.split(item)) for item in cmd]
window_options = dict(cmd)
for key, value in window_options.items():
if value.isdigit():
window_options[key] = int(value)
return window_options
|
java
|
public static <T extends IHasIntegerId> void insertFlatList( List<T> list, TableCollectionManager<T> mng, Func1<T, Integer> parentIdGetter )
{
HashSet<Integer> inserted = new HashSet<>();
List<T> toInsert = new ArrayList<>( list );
List<T> postPoned = new ArrayList<>();
List<T> inOrder = new ArrayList<>();
int nbInserted;
while( ! toInsert.isEmpty() )
{
nbInserted = 0;
while( ! toInsert.isEmpty() )
{
T a = toInsert.remove( 0 );
Integer parentId = parentIdGetter.exec( a );
if( parentId==null || parentId<=0 || inserted.contains( parentId ) || mng.getRowForRecordId( parentId )!=null )
{
inOrder.add( a );
inserted.add( a.getId() );
nbInserted++;
}
else
{
postPoned.add( a );
}
}
toInsert = postPoned;
postPoned = new ArrayList<>();
if( nbInserted == 0 && ! toInsert.isEmpty() )
{
GWT.log("Cannot construct full tree !");
throw new RuntimeException( "Cannot construct full tree !" );
}
}
for( T t : inOrder )
mng.getDataPlug().updated( t );
}
|
python
|
def open(self, _file, target=DEFAULT_TARGET):
"""
Open the existing file for reading.
@param _file : A filename of file descriptor.
@param target: A user-specific BFD target name.
@return : None
"""
# Close any existing BFD structure instance.
self.close()
#
# STEP 1. Open the BFD pointer.
#
# Determine if the user passed a file-descriptor or a _file and
# proceed accordingly.
if type(_file) is FileType:
# The user specified a file descriptor.
filename = _file.name
if islink(filename):
raise BfdException("Symlinks file-descriptors are not valid")
try:
self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno()))
except Exception, err:
raise BfdException(
"Unable to open file-descriptor %s : %s" % (filename, err))
elif type(_file) is StringType:
# The user spcified a filaname so first check if file exists.
filename = _file
try:
with open(_file): pass
except IOError:
raise BfdException("File %s does not exist." % filename)
#
# Proceed to open the specified file and create a new BFD.
#
try:
self._ptr = _bfd.openr(filename, target)
except (TypeError, IOError), err:
raise BfdException(
"Unable to open file %s : %s" % (filename, err))
elif type(_file) is IntType:
# The user specified an already-open BFD pointer so we avoid any
# further open operation and move on to file format recognition.
self._ptr = _file
else:
raise BfdException(
"Invalid file type specified for open operation (%r)" % _file)
#
# STEP 2. Determine file format of the BFD.
#
# Now that the BFD is open we'll proceed to determine its file format.
# We'll use the objdump logic to determine it and raise an error in
# case we were unable to get it right.
#
try:
# Type opening it as an archieve and if it success then check
# subfiles.
if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE):
# Set current format and store the inner file list.
self.file_format = BfdFormat.ARCHIVE
self.__populate_archive_files()
else:
# DO NOT USE bfd_check_format_matches() becuase its not tested.
# An implementation example if on objdump.c at function
# display_bfd().
if _bfd.check_format(self._ptr, BfdFormat.OBJECT):
self.file_format = BfdFormat.OBJECT
elif _bfd.check_format(self._ptr, BfdFormat.CORE):
self.file_format = BfdFormat.CORE
else:
pass
raise BfdException(_bfd.get_last_error_message())
except TypeError, err:
raise BfdException(
"Unable to initialize file format : %s" % err)
#
# STEP 3. Extract inner sections and symbolic information.
#
if self._ptr is not None:
# If the file is a valid BFD file format but not an archive then
# get its sections and symbolic information (if any).
if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]:
self.__populate_sections()
self.__populate_symbols()
|
java
|
public AttributeDataset numericDataset(String responseColName, String... variablesColNames) {
return dataset(table.numberColumn(responseColName), AttributeType.NUMERIC, table.columns(variablesColNames));
}
|
python
|
def intent_path(cls, project, intent):
"""Return a fully-qualified intent string."""
return google.api_core.path_template.expand(
'projects/{project}/agent/intents/{intent}',
project=project,
intent=intent,
)
|
python
|
def assign_value(self, comp_def, value, src_ref):
"""
Set both alias and actual value
"""
super().assign_value(comp_def, value, src_ref)
comp_def.properties['incrthreshold'] = value
|
java
|
public static double getRMSD(MultipleAlignment alignment) {
List<Atom[]> trans = MultipleAlignmentTools.transformAtoms(alignment);
return getRMSD(trans);
}
|
python
|
def return_markers(self):
"""Return all the markers (also called triggers or events).
Returns
-------
list of dict
where each dict contains 'name' as str, 'start' and 'end' as float
in seconds from the start of the recordings, and 'chan' as list of
str with the channels involved (if not of relevance, it's None).
Raises
------
FileNotFoundError
when it cannot read the events for some reason (don't use other
exceptions).
"""
markers = []
triggers = self._triggers
DTYPE_MAX = iinfo(triggers.dtype['sample']).max
triggers = triggers[triggers['sample'] != DTYPE_MAX]
for trig in triggers:
markers.append(
{'name': str(trig['code']),
'start': trig['sample'] / self._s_freq,
'end': trig['sample'] / self._s_freq,
})
return markers
|
python
|
def sort(self, callback=None):
"""
Sort through each item with a callback.
:param callback: The callback
:type callback: callable or None
:rtype: Collection
"""
items = self.items
if callback:
return self.__class__(sorted(items, key=callback))
else:
return self.__class__(sorted(items))
|
java
|
public synchronized void push(@Nullable T item) {
if( waiting == null ) {
throw new IllegalStateException("Invalid attempt to add an item to a completed list.");
}
if( filter != null ) {
try {
if( !filter.filter(item) ) {
return;
}
}
catch( Throwable t ) {
logger.error("[" + this + "] Error filtering " + item + ": " + t.getMessage());
Exception e;
if( t instanceof Exception ) {
e = (Exception)t;
}
else {
e = new RuntimeException(t);
}
setLoadException(e);
return;
}
}
waiting.add(item);
lastTouch = System.currentTimeMillis();
notifyAll();
}
|
java
|
public Swagger2MarkupConfigBuilder withMarkupLanguage(MarkupLanguage markupLanguage) {
Validate.notNull(markupLanguage, "%s must not be null", "markupLanguage");
config.markupLanguage = markupLanguage;
return this;
}
|
python
|
def compute_key_composite(password=None, keyfile=None):
"""Compute composite key.
Used in header verification and payload decryption."""
# hash the password
if password:
password_composite = hashlib.sha256(password.encode('utf-8')).digest()
else:
password_composite = b''
# hash the keyfile
if keyfile:
# try to read XML keyfile
try:
with open(keyfile, 'r') as f:
tree = etree.parse(f).getroot()
keyfile_composite = base64.b64decode(tree.find('Key/Data').text)
# otherwise, try to read plain keyfile
except (etree.XMLSyntaxError, UnicodeDecodeError):
try:
with open(keyfile, 'rb') as f:
key = f.read()
try:
int(key, 16)
is_hex = True
except ValueError:
is_hex = False
# if the length is 32 bytes we assume it is the key
if len(key) == 32:
keyfile_composite = key
# if the length is 64 bytes we assume the key is hex encoded
elif len(key) == 64 and is_hex:
keyfile_composite = codecs.decode(key, 'hex')
# anything else may be a file to hash for the key
else:
keyfile_composite = hashlib.sha256(key).digest()
except:
raise IOError('Could not read keyfile')
else:
keyfile_composite = b''
# create composite key from password and keyfile composites
return hashlib.sha256(password_composite + keyfile_composite).digest()
|
python
|
def sinkhorn_knopp(a, b, M, reg, numItermax=1000,
stopThr=1e-9, verbose=False, log=False, **kwargs):
"""
Solve the entropic regularization optimal transport problem and return the OT matrix
The function solves the following optimization problem:
.. math::
\gamma = arg\min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the (ns,nt) metric cost matrix
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- a and b are source and target weights (sum to 1)
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [2]_
Parameters
----------
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,) or np.ndarray (nt,nbb)
samples in the target domain, compute sinkhorn with multiple targets
and fixed M if b is a matrix (return OT loss + dual variables in log)
M : np.ndarray (ns,nt)
loss matrix
reg : float
Regularization term >0
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> import ot
>>> a=[.5,.5]
>>> b=[.5,.5]
>>> M=[[0.,1.],[1.,0.]]
>>> ot.sinkhorn(a,b,M,1)
array([[ 0.36552929, 0.13447071],
[ 0.13447071, 0.36552929]])
References
----------
.. [2] M. Cuturi, Sinkhorn Distances : Lightspeed Computation of Optimal Transport, Advances in Neural Information Processing Systems (NIPS) 26, 2013
See Also
--------
ot.lp.emd : Unregularized OT
ot.optim.cg : General regularized OT
"""
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
# init data
Nini = len(a)
Nfin = len(b)
if len(b.shape) > 1:
nbb = b.shape[1]
else:
nbb = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if nbb:
u = np.ones((Nini, nbb)) / Nini
v = np.ones((Nfin, nbb)) / Nfin
else:
u = np.ones(Nini) / Nini
v = np.ones(Nfin) / Nfin
# print(reg)
# Next 3 lines equivalent to K= np.exp(-M/reg), but faster to compute
K = np.empty(M.shape, dtype=M.dtype)
np.divide(M, -reg, out=K)
np.exp(K, out=K)
# print(np.min(K))
tmp2 = np.empty(b.shape, dtype=M.dtype)
Kp = (1 / a).reshape(-1, 1) * K
cpt = 0
err = 1
while (err > stopThr and cpt < numItermax):
uprev = u
vprev = v
KtransposeU = np.dot(K.T, u)
v = np.divide(b, KtransposeU)
u = 1. / np.dot(Kp, v)
if (np.any(KtransposeU == 0)
or np.any(np.isnan(u)) or np.any(np.isnan(v))
or np.any(np.isinf(u)) or np.any(np.isinf(v))):
# we have reached the machine precision
# come back to previous solution and quit loop
print('Warning: numerical errors at iteration', cpt)
u = uprev
v = vprev
break
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
if nbb:
err = np.sum((u - uprev)**2) / np.sum((u)**2) + \
np.sum((v - vprev)**2) / np.sum((v)**2)
else:
# compute right marginal tmp2= (diag(u)Kdiag(v))^T1
np.einsum('i,ij,j->j', u, K, v, out=tmp2)
err = np.linalg.norm(tmp2 - b)**2 # violation of marginal
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print(
'{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt = cpt + 1
if log:
log['u'] = u
log['v'] = v
if nbb: # return only loss
res = np.einsum('ik,ij,jk,ij->k', u, K, v, M)
if log:
return res, log
else:
return res
else: # return OT matrix
if log:
return u.reshape((-1, 1)) * K * v.reshape((1, -1)), log
else:
return u.reshape((-1, 1)) * K * v.reshape((1, -1))
|
python
|
def batch_remove_retrain(nmask_train, nmask_test, X_train, y_train, X_test, y_test, attr_train, attr_test, model_generator, metric):
""" An approximation of holdout that only retraines the model once.
This is alse called ROAR (RemOve And Retrain) in work by Google. It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once, instead of retraining the model for every test sample like
the holdout metric.
"""
warnings.warn("The retrain based measures can incorrectly evaluate models in some cases!")
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# mask nmask top features for each explanation
X_train_tmp = X_train.copy()
X_train_mean = X_train.mean(0)
tie_breaking_noise = const_rand(X_train.shape[1]) * 1e-6
for i in range(len(y_train)):
if nmask_train[i] > 0:
ordering = np.argsort(-attr_train[i, :] + tie_breaking_noise)
X_train_tmp[i, ordering[:nmask_train[i]]] = X_train_mean[ordering[:nmask_train[i]]]
X_test_tmp = X_test.copy()
for i in range(len(y_test)):
if nmask_test[i] > 0:
ordering = np.argsort(-attr_test[i, :] + tie_breaking_noise)
X_test_tmp[i, ordering[:nmask_test[i]]] = X_train_mean[ordering[:nmask_test[i]]]
# train the model with all the given features masked
model_masked = model_generator()
model_masked.fit(X_train_tmp, y_train)
yp_test_masked = model_masked.predict(X_test_tmp)
return metric(y_test, yp_test_masked)
|
python
|
def one_hot(indices, output_dim, on_value=1.0,
off_value=0.0, dtype=tf.float32, name=None):
"""One hot operation.
TODO(noam): Is there a good reason we need a special mtf.Operation here?
We could just use some code like this:
cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)),
dtype)
Args:
indices: a Tensor
output_dim: a Dimension
on_value: Value taken when indices are on at a location, default 1
off_value: Value taken when indices are off at a location, default 0
dtype: a tf.DType
name: an optional string
Returns:
a Tensor with shape extended by output_dim for the last axis.
"""
return OneHotOperation(
indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]
|
python
|
def drop_tips(self, names=None, wildcard=None, regex=None):
"""
Returns a copy of the tree with the selected tips removed. The entered
value can be a name or list of names. To prune on an internal node to
create a subtree see the .prune() function instead.
Parameters:
tips: list of tip names.
# example:
ptre = tre.drop_tips(['a', 'b'])
"""
# make a deepcopy of the tree
nself = self.copy()
# return if nothing to drop
if not any([names, wildcard, regex]):
return nself
# get matching names list with fuzzy match
tipnames = fuzzy_match_tipnames(
ttree=nself,
names=names,
wildcard=wildcard,
regex=regex,
mrca=False,
mono=False,
)
if len(tipnames) == len(nself):
raise ToytreeError("You cannot drop all tips from the tree.")
if not tipnames:
raise ToytreeError("No tips selected.")
keeptips = [i for i in nself.get_tip_labels() if i not in tipnames]
nself.treenode.prune(keeptips, preserve_branch_length=True)
nself._coords.update()
return nself
|
java
|
private float[] LABtoXYZ(float L, float a, float b, float[] xyzResult) {
// Significant speedup: Removing Math.pow
float y = (L + 16.0f) / 116.0f;
float y3 = y * y * y; // Math.pow(y, 3.0);
float x = (a / 500.0f) + y;
float x3 = x * x * x; // Math.pow(x, 3.0);
float z = y - (b / 200.0f);
float z3 = z * z * z; // Math.pow(z, 3.0);
if (y3 > 0.008856f) {
y = y3;
}
else {
y = (y - (16.0f / 116.0f)) / 7.787f;
}
if (x3 > 0.008856f) {
x = x3;
}
else {
x = (x - (16.0f / 116.0f)) / 7.787f;
}
if (z3 > 0.008856f) {
z = z3;
}
else {
z = (z - (16.0f / 116.0f)) / 7.787f;
}
xyzResult[0] = x * whitePoint[0];
xyzResult[1] = y * whitePoint[1];
xyzResult[2] = z * whitePoint[2];
return xyzResult;
}
|
python
|
def _gridmake2(x1, x2):
"""
Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if x1.ndim == 1 and x2.ndim == 1:
return np.column_stack([np.tile(x1, x2.shape[0]),
np.repeat(x2, x1.shape[0])])
elif x1.ndim > 1 and x2.ndim == 1:
first = np.tile(x1, (x2.shape[0], 1))
second = np.repeat(x2, x1.shape[0])
return np.column_stack([first, second])
else:
raise NotImplementedError("Come back here")
|
java
|
protected void createSamplePoints(int numSamples) {
for( int y = 0; y < numSamples; y++ ) {
float regionY = (y/(numSamples-1.0f) - 0.5f);
for( int x = 0; x < numSamples; x++ ) {
float regionX = (x/(numSamples-1.0f) - 0.5f);
samplePts.add( new Point2D_F32(regionX,regionY));
}
}
}
|
java
|
public void getPresenter(final Consumer<Object> presenterConsumer) {
presenterProperty.addListener(
(final ObservableValue<? extends Object> o, final Object oldValue, final Object newValue) -> {
presenterConsumer.accept(newValue);
});
}
|
java
|
public synchronized FhirValidator setValidateAgainstStandardSchematron(boolean theValidateAgainstStandardSchematron) {
if (theValidateAgainstStandardSchematron && !ourPhPresentOnClasspath) {
throw new IllegalArgumentException(myContext.getLocalizer().getMessage(I18N_KEY_NO_PH_ERROR));
}
if (!theValidateAgainstStandardSchematron && !ourPhPresentOnClasspath) {
return this;
}
Class<? extends IValidatorModule> cls = SchematronProvider.getSchematronValidatorClass();
IValidatorModule instance = SchematronProvider.getSchematronValidatorInstance(myContext);
addOrRemoveValidator(theValidateAgainstStandardSchematron, cls, instance);
return this;
}
|
python
|
def upload_file(self, filepath, key):
"""Uploads a file using the passed S3 key
This method uploads a file specified by the filepath to S3
using the provided S3 key.
:param filepath: (str) Full path to the file to be uploaded
:param key: (str) S3 key to be set for the upload
:return: True if upload is successful, False otherwise.
"""
log = logging.getLogger(self.cls_logger + '.upload_file')
log.info('Attempting to upload file %s to S3 bucket %s as key %s...',
filepath, self.bucket_name, key)
if not isinstance(filepath, basestring):
log.error('filepath argument is not a string')
return False
if not isinstance(key, basestring):
log.error('key argument is not a string')
return False
if not os.path.isfile(filepath):
log.error('File not found on file system: %s', filepath)
return False
try:
self.s3client.upload_file(
Filename=filepath, Bucket=self.bucket_name, Key=key)
except ClientError as e:
log.error('Unable to upload file %s to bucket %s as key %s:\n%s',
filepath, self.bucket_name, key, e)
return False
else:
log.info('Successfully uploaded file to S3 bucket %s as key %s',
self.bucket_name, key)
return True
|
python
|
def chk_qualifiers(self):
"""Check format of qualifier"""
if self.name == 'id2gos':
return
for ntd in self.associations:
# print(ntd)
qual = ntd.Qualifier
assert isinstance(qual, set), '{NAME}: QUALIFIER MUST BE A LIST: {NT}'.format(
NAME=self.name, NT=ntd)
assert qual != set(['']), ntd
assert qual != set(['-']), ntd
assert 'always' not in qual, 'SPEC SAID IT WOULD BE THERE'
|
python
|
def lfu_cache(max_size=128):
"""
Least Frequently Used cache decorator, implementing :class:`faste.caches.LFUCache`
:keyword max_size: max cache size
"""
def actual_decorator(func):
return _cached_func(func, caches.LFUCache, max_size)
return actual_decorator
|
python
|
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs):
# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]
"""Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form
"""
stream = Stream(filepath, headers=headers, **kwargs)
stream.open()
result = stream.read(keyed=dict_form)
stream.close()
return result
|
python
|
def Rz(rads: Union[float, sympy.Basic]) -> ZPowGate:
"""Returns a gate with the matrix e^{-i Z rads / 2}."""
pi = sympy.pi if protocols.is_parameterized(rads) else np.pi
return ZPowGate(exponent=rads / pi, global_shift=-0.5)
|
python
|
def QA_SU_save_option_min(client=DATABASE, ui_log=None, ui_progress=None):
'''
:param client:
:return:
'''
option_contract_list = QA_fetch_get_option_contract_time_to_market()
coll_option_min = client.option_day_min
coll_option_min.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# 索引 code
err = []
def __saving_work(code, coll):
QA_util_log_info(
'##JOB13 Now Saving Option 50ETF MIN ==== {}'.format(str(code)),
ui_log=ui_log
)
try:
for type in ['1min', '5min', '15min', '30min', '60min']:
ref_ = coll.find({'code': str(code)[0:8], 'type': type})
end_time = str(now_time())[0:19]
if ref_.count() > 0:
start_time = ref_[ref_.count() - 1]['datetime']
QA_util_log_info(
'##JOB13.{} Now Saving Option 50ETF {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
QA_util_log_info(
" 写入 新增历史合约记录数 {} ".format(len(__data))
)
coll.insert_many(
QA_util_to_json_from_pandas(__data[1::])
)
else:
start_time = '2015-01-01'
QA_util_log_info(
'##JOB13.{} Now Option 50ETF {} from {} to {} =={} '
.format(
['1min',
'5min',
'15min',
'30min',
'60min'].index(type),
str(code),
start_time,
end_time,
type
),
ui_log=ui_log
)
if start_time != end_time:
__data = QA_fetch_get_future_min(
str(code),
start_time,
end_time,
type
)
if len(__data) > 1:
QA_util_log_info(
" 写入 新增合约记录数 {} ".format(len(__data))
)
coll.insert_many(
QA_util_to_json_from_pandas(__data)
)
except:
err.append(code)
executor = ThreadPoolExecutor(max_workers=4)
res = {
executor.submit(
__saving_work,
option_contract_list[i_]["code"],
coll_option_min
)
for i_ in range(len(option_contract_list))
} # multi index ./.
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(option_contract_list)),
ui_log=ui_log
)
strLogProgress = 'DOWNLOAD PROGRESS {} '.format(
str(float(count / len(option_contract_list) * 100))[0:4] + '%'
)
intLogProgress = int(float(count / len(option_contract_list) * 10000.0))
QA_util_log_info(
strLogProgress,
ui_log=ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intLogProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info('SUCCESS', ui_log=ui_log)
else:
QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log)
|
java
|
int at(int n) throws
IOException
{
Mark mark = mark();
List<Integer> cbuf = Lists.newArrayList();
for (int i = 0; i < n; i++)
{
cbuf.add(next());
if (curChar == -1)
{
break;
}
}
unread(cbuf, mark);
return cbuf.get(cbuf.size() - 1);
}
|
java
|
public static MutableDoubleTuple arithmeticMean(
Collection<? extends DoubleTuple> tuples,
MutableDoubleTuple result)
{
if (tuples.isEmpty())
{
return null;
}
result = add(tuples, result);
return DoubleTuples.multiply(result, 1.0 / tuples.size(), result);
}
|
java
|
@Override
public InetAddress getHostAddress(Object endPoint)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "getHostAddress", endPoint);
InetAddress address = null;
if (endPoint instanceof CFEndPoint)
{
address = ((CFEndPoint) endPoint).getAddress();
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(this, tc, "getHostAddress", address);
return address;
}
|
python
|
def sto(zeta,N=1,L=0,M=0,origin=(0,0,0)):
"""
Use Stewarts STO-6G fits to create a contracted Gaussian approximation to a
Slater function. Fits of other expansion lengths (1G, 3G, etc) are in the paper.
Reference: RF Stewart, JCP 52, 431 (1970)
>>> s = sto(1)
>>> np.isclose(s(0,0,0),0.530121)
True
"""
nlm2powers = {
(1,0,0) : (0,0,0,0), # x,y,z,r
(2,0,0) : (0,0,0,1),
(3,0,0) : (0,0,0,2),
(2,1,0) : (1,0,0,0),
(2,1,1) : (0,1,0,0),
(2,1,-1) : (0,0,1,0),
(3,1,0) : (1,0,0,1),
(3,1,1) : (0,1,0,1),
(3,1,-1) : (0,0,1,1)
}
gexps_1s = [2.310303149e01,4.235915534e00,1.185056519e00,
4.070988982e-01,1.580884151e-01,6.510953954e-02]
gcoefs_1s = [9.163596280e-03,4.936149294e-02,1.685383049e-01,
3.705627997e-01,4.164915298e-01,1.303340841e-01]
gexps_2s = [2.768496241e01,5.077140627e00,1.426786050e00,
2.040335729e-01,9.260298399e-02,4.416183978e-02]
gcoefs_2s = [-4.151277819e-03,-2.067024148e-02,-5.150303337e-02,
3.346271174e-01,5.621061301e-01,1.712994697e-01]
gexps_2p = [5.868285913e00,1.530329631e00,5.475665231e-01,
2.288932733e-01,1.046655969e-01,4.948220127e-02]
gcoefs_2p = [7.924233646e-03,5.144104825e-02,1.898400060e-01,
4.049863191e-01,4.012362861e-01,1.051855189e-01]
gexps_3s = [3.273031938e00,9.200611311e-01,3.593349765e-01,
8.636686991e-02,4.797373812e-02,2.724741144e-02]
gcoefs_3s = [-6.775596947e-03,-5.639325779e-02,-1.587856086e-01,
5.534527651e-01,5.015351020e-01,7.223633674e-02]
gexps_3p = [5.077973607e00,1.340786940e00,2.248434849e-01,
1.131741848e-01,6.076408893e-02,3.315424265e-02]
gcoefs_3p = [-3.329929840e-03,-1.419488340e-02,1.639395770e-01,
4.485358256e-01,3.908813050e-01,7.411456232e-02]
gexps_3d = [2.488296923,7.981487853e-1,3.311327490e-1,
1.559114463e-1,7.877734732e-2,4.058484363e-2]
gcoefs_3d = [7.283828112e-3,5.386799363e-2,2.072139149e-1,
4.266269092e-1,3.843100204e-1,8.902827546e-2]
gexps_4s = [3.232838646,3.605788802e-1,1.717902487e-1,
5.277666487e-2,3.163400284e-2,1.874093091e-2]
gcoefs_4s = [1.374817488e-3,-8.666390043e-2,-3.130627309e-1,
7.812787397e-1,4.389247988-1,2.487178756e-2]
gexps_4p = [2.389722618, 7.960947826e-1,3.415541380e-1,
8.847434525e-2,4.958248334e-2,2.816929784e-2]
gcoefs_4p = [-1.665913575e-3,-1.657464971e-2,-5.958513378e-2,
4.053115554e-1,5.433958189e-1,1.20970491e-1]
gexps = { # indexed by N,s_or_p:
(1,0) : gexps_1s,
(2,0) : gexps_2s,
(2,1) : gexps_2p,
(3,0) : gexps_3s,
(3,1) : gexps_3p
}
gcoefs = { # indexed by N,s_or_p:
(1,0) : gcoefs_1s,
(2,0) : gcoefs_2s,
(2,1) : gcoefs_2p,
(3,0) : gcoefs_3s,
(3,1) : gcoefs_3p
}
I,J,K,R = nlm2powers[(N,L,M)]
exps = [zeta**2*expn for expn in gexps[(N,L)]]
coefs = gcoefs[N,L]
return cgbf(origin,(I,J,K),exps,coefs)
|
python
|
def createLabels2D(self):
""" 2D labeling at zmax """
logger.debug(" Creating 2D labels...")
self.zmax = np.argmax(self.values,axis=1)
self.vmax = self.values[np.arange(len(self.pixels),dtype=int),self.zmax]
kwargs=dict(pixels=self.pixels,values=self.vmax,nside=self.nside,
threshold=self.threshold,xsize=self.xsize)
labels,nlabels = CandidateSearch.labelHealpix(**kwargs)
self.nlabels = nlabels
self.labels = np.repeat(labels,len(self.distances)).reshape(len(labels),len(self.distances))
return self.labels, self.nlabels
|
python
|
def lookup_controller(obj, remainder, request=None):
'''
Traverses the requested url path and returns the appropriate controller
object, including default routes.
Handles common errors gracefully.
'''
if request is None:
warnings.warn(
(
"The function signature for %s.lookup_controller is changing "
"in the next version of pecan.\nPlease update to: "
"`lookup_controller(self, obj, remainder, request)`." % (
__name__,
)
),
DeprecationWarning
)
notfound_handlers = []
while True:
try:
obj, remainder = find_object(obj, remainder, notfound_handlers,
request)
handle_security(obj)
return obj, remainder
except (exc.HTTPNotFound, exc.HTTPMethodNotAllowed,
PecanNotFound) as e:
if isinstance(e, PecanNotFound):
e = exc.HTTPNotFound()
while notfound_handlers:
name, obj, remainder = notfound_handlers.pop()
if name == '_default':
# Notfound handler is, in fact, a controller, so stop
# traversal
return obj, remainder
else:
# Notfound handler is an internal redirect, so continue
# traversal
result = handle_lookup_traversal(obj, remainder)
if result:
# If no arguments are passed to the _lookup, yet the
# argspec requires at least one, raise a 404
if (
remainder == [''] and
len(obj._pecan['argspec'].args) > 1
):
raise e
obj_, remainder_ = result
return lookup_controller(obj_, remainder_, request)
else:
raise e
|
java
|
public void marshall(DestinationSettings destinationSettings, ProtocolMarshaller protocolMarshaller) {
if (destinationSettings == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(destinationSettings.getS3Settings(), S3SETTINGS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
java
|
@Check(CheckType.FAST)
public void checkDiscouragedCalls(XAbstractFeatureCall expression) {
if (!isIgnored(DISCOURAGED_REFERENCE)
&& this.featureCallValidator.isDiscouragedCall(expression)) {
addIssue(
MessageFormat.format(Messages.SARLValidator_37,
expression.getConcreteSyntaxFeatureName()),
expression,
DISCOURAGED_REFERENCE);
}
}
|
python
|
def new_metric(self, meta):
"""
Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric
"""
type_ = meta.get('type')
if not type_:
raise ValueError('Metric type should be defined.')
if type_ in available_metrics:
metric_obj = available_metrics[type_](meta, self.routing_queue) # create metric object
metric_meta = pd.DataFrame({metric_obj.local_id: meta}).T # create metric meta
self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta
self.metrics[metric_obj.local_id] = metric_obj # register metric object
# find subscribers for this metric
this_metric_subscribers = self.__reversed_filter(self.subscribers, meta)
if this_metric_subscribers.empty:
logger.debug('subscriber for metric %s not found', metric_obj.local_id)
else:
logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers)
# attach this metric id to discovered subscribers and select id <-> callbacks
this_metric_subscribers['id'] = metric_obj.local_id
found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id')
# add this metric callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(found_callbacks)
return metric_obj
else:
raise NotImplementedError('Unknown metric type: %s' % type_)
|
python
|
def locate_arcgis():
'''
Find the path to the ArcGIS Desktop installation.
Keys to check:
HLKM/SOFTWARE/ESRI/ArcGIS 'RealVersion' - will give the version, then we can use
that to go to
HKLM/SOFTWARE/ESRI/DesktopXX.X 'InstallDir'. Where XX.X is the version
We may need to check HKLM/SOFTWARE/Wow6432Node/ESRI instead
'''
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\ESRI\\ArcGIS', 0)
version = _winreg.QueryValueEx(key, "RealVersion")[0][:4]
key_string = "SOFTWARE\\Wow6432Node\\ESRI\\Desktop{0}".format(version)
desktop_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
key_string, 0)
install_dir = _winreg.QueryValueEx(desktop_key, "InstallDir")[0]
return install_dir
except WindowsError:
raise ImportError("Could not locate the ArcGIS directory on this machine")
|
python
|
def compress(self, setup):
"""
Returns the compressed graph according to the given experimental setup
Parameters
----------
setup : :class:`caspo.core.setup.Setup`
Experimental setup used to compress the graph
Returns
-------
caspo.core.graph.Graph
Compressed graph
"""
designated = set(setup.nodes)
zipped = self.copy()
marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)]
while marked:
for node, _ in sorted(marked):
backward = zipped.predecessors(node)
forward = zipped.successors(node)
if not backward or (len(backward) == 1 and not backward[0] in forward):
self.__merge_source_targets(node, zipped)
elif not forward or (len(forward) == 1 and not forward[0] in backward):
self.__merge_target_sources(node, zipped)
else:
designated.add(node)
marked = [(n, d) for n, d in self.nodes(data=True) if n not in designated and not d.get('compressed', False)]
not_compressed = [(n, d) for n, d in zipped.nodes(data=True) if not d.get('compressed', False)]
return zipped.subgraph([n for n, _ in not_compressed])
|
python
|
def api_token_required(f, *args, **kwargs):
"""
Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured.
"""
try:
if args[0].api_token is None:
raise AttributeError('Parameter api_token is required.')
except AttributeError:
raise AttributeError('Parameter api_token is required.')
return f(*args, **kwargs)
|
java
|
public static String likePrefix(String param) {
return param != null ? param.toLowerCase().replace("_", "\\_").replace("%", "\\%") + "%" : null;
}
|
java
|
@Override
final public void putAll(Map map) {
ValidateUtility.objectNotNull(map, "map");
Iterator it = map.entrySet().iterator();
while (it.hasNext()) {
Map.Entry e = (Map.Entry) it.next();
put(e.getKey(), e.getValue());
}
}
|
java
|
public void setFleetDetails(java.util.Collection<Fleet> fleetDetails) {
if (fleetDetails == null) {
this.fleetDetails = null;
return;
}
this.fleetDetails = new java.util.ArrayList<Fleet>(fleetDetails);
}
|
java
|
private BitSet parseQuery(FilterQuery filterQuery, DataSchema schema) {
BitSet queryBitSet = new BitSet();
if(filterQuery instanceof MatchAllQuery || filterQuery.getFilterQueries().size() < 1) return queryBitSet;
//facet drill down happens here
for (String filterField : filterQuery.getFilterQueries().keySet()) {
final int columnIndex = schema.getColumnIndex(filterField);
final Map<String, BitSet> map = fieldInvertedIndex.get(columnIndex);
final FilterQuery.FilterQueryElement filterQueryElement = filterQuery.getFilterQueries().get(filterField);
DataType type = schema.getType(columnIndex);
switch (type) {
case CoordinateLat:
case CoordinateLon:
case String:
case Boolean:
case Integer:
// if range query
if (filterQueryElement instanceof FilterQuery.FilterFieldRange) {
queryBitSet = processRangeQueryIntegerTypes(queryBitSet, map, filterQueryElement, type);
} else if (filterQueryElement instanceof FilterQuery.FilterFieldGreaterThanInteger) {
queryBitSet = processIntegerGreaterThanDefaultTypes(queryBitSet, map, filterQueryElement, type);
} else {
// does most of the commons multi value types
queryBitSet = processMultiValueQueryDefaultTypes(queryBitSet, map, filterQueryElement, type);
}
break;
case Date:
if (filterQueryElement instanceof FilterQuery.FilterFieldRangeDate) {
queryBitSet = processRangeQueryISODateTypes(queryBitSet, map, filterQueryElement, type);
} else {
queryBitSet = processRangeQueryMultiValueQueryDateYear(queryBitSet, map, filterQueryElement, type);
}
break;
case DateYear:
// if range query
if (filterQueryElement instanceof FilterQuery.FilterFieldRange) {
queryBitSet = processRangeQueryDateYearType(queryBitSet, map, filterQueryElement, type);
} else {
queryBitSet = processRangeQueryMultiValueQueryDateYear(queryBitSet, map, filterQueryElement, type);
}
break;
}
}
return queryBitSet;
}
|
java
|
public static String serializeAsString(Object o) throws IOException {
try {
JsonStructure builtJsonObject = findFieldsToSerialize(o).mainObject;
return builtJsonObject.toString();
} catch (IllegalStateException ise) {
// the reflective attempt to build the object failed.
throw new IOException("Unable to build JSON for Object", ise);
} catch (JsonException e) {
throw new IOException("Unable to build JSON for Object", e);
}
}
|
python
|
def on_okButton(self, event):
"""
Complies information input in GUI into a kwargs dictionary which can
be passed into the utrecht_magic script and run to output magic files
"""
os.chdir(self.WD)
options_dict = {}
wd = self.WD
options_dict['dir_path'] = wd
full_file = self.bSizer0.return_value()
if not full_file:
pw.simple_warning('You must provide a Utrecht format file')
return False
input_directory, Utrecht_file = os.path.split(full_file)
options_dict['mag_file'] = Utrecht_file
options_dict['input_dir_path'] = input_directory
if input_directory:
ID = "-ID " + input_directory
else:
ID = ''
outfile = Utrecht_file + ".magic"
options_dict['meas_file'] = outfile
samp_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_samples.txt"
options_dict['samp_file'] = samp_outfile
spec_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_specimens.txt"
options_dict['spec_file'] = spec_outfile
site_outfile = Utrecht_file[:Utrecht_file.find('.')] + "_er_sites.txt"
options_dict['site_file'] = site_outfile
dc_flag,dc_params = '',''
if self.bSizer6.return_value() != '':
dc_params = list(map(float,self.bSizer6.return_value().split()))
options_dict['dc_params'] = dc_params
dc_flag = '-dc'
spec_num = self.bSizer3.return_value()
options_dict['specnum'] = spec_num
if spec_num:
spec_num = "-spc " + str(spec_num)
else:
spec_num = "-spc 0" # defaults to 0 if user doesn't choose number
loc_name = self.bSizer4.return_value()
options_dict['location_name'] = loc_name
if loc_name:
loc_name = "-loc " + loc_name
ncn = self.bSizer2.return_value()
options_dict['samp_con'] = ncn
particulars = self.bSizer1.return_value()
options_dict['meth_code'] = particulars
if particulars:
particulars = "-mcd " + particulars
euro_date = self.bSizer7.return_value()
if euro_date: options_dict['dmy_flag'] = True; dmy_flag='-dmy'
else: options_dict['dmy_flag'] = False; dmy_flag=''
try: site_lat,site_lon = self.bSizer8.return_value().split()
except ValueError: site_lat,site_lon = '',''
options_dict['site_lat'] = site_lat
options_dict['site_lon'] = site_lon
replicate = self.bSizer5.return_value()
if replicate:
options_dict['avg'] = False
replicate = ''
else:
options_dict['avg'] = True
replicate = '-A'
COMMAND = "cit_magic.py -WD {} -f {} -F {} {} {} {} -ncn {} {} -Fsp {} -Fsi {} -Fsa {} {} {} {} {} -lat {} -lon {}".format(wd, Utrecht_file, outfile, particulars, spec_num, loc_name, ncn, ID, spec_outfile, site_outfile, samp_outfile, replicate, dc_flag, dc_params, dmy_flag, site_lon, site_lat)
# to run as module:
program_ran, error_message = utrecht_magic.main(command_line=False, **options_dict)
if program_ran:
pw.close_window(self, COMMAND, outfile)
else:
pw.simple_warning(error_message)
|
java
|
public Matrix3x2f rotateTo(Vector2fc fromDir, Vector2fc toDir, Matrix3x2f dest) {
float dot = fromDir.x() * toDir.x() + fromDir.y() * toDir.y();
float det = fromDir.x() * toDir.y() - fromDir.y() * toDir.x();
float rm00 = dot;
float rm01 = det;
float rm10 = -det;
float rm11 = dot;
float nm00 = m00 * rm00 + m10 * rm01;
float nm01 = m01 * rm00 + m11 * rm01;
dest.m10 = m00 * rm10 + m10 * rm11;
dest.m11 = m01 * rm10 + m11 * rm11;
dest.m00 = nm00;
dest.m01 = nm01;
dest.m20 = m20;
dest.m21 = m21;
return dest;
}
|
python
|
def get_latex_figure_str(fpath_list, caption_str=None, label_str=None,
width_str=r'\textwidth', height_str=None, nCols=None,
dpath=None, colpos_sep=' ', nlsep='',
use_sublbls=None, use_frame=False):
r"""
Args:
fpath_list (list):
dpath (str): directory relative to main tex file
Returns:
str: figure_str
CommandLine:
python -m utool.util_latex --test-get_latex_figure_str
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> fpath_list = ['figures/foo.png']
>>> figure_str = get_latex_figure_str(fpath_list)
>>> result = str(figure_str)
>>> print(result)
"""
import utool as ut
if nCols is None:
nCols = len(fpath_list)
USE_SUBFIGURE = True
if width_str is not None:
colwidth = (1.0 / nCols)
if USE_SUBFIGURE:
colwidth *= .95
graphics_sizestr = ('%.2f' % (colwidth,)) + width_str
else:
graphics_sizestr = '[width=%.1f%s]' % (colwidth, width_str)
elif height_str is not None:
graphics_sizestr = '[height=%s]' % (height_str)
else:
graphics_sizestr = ''
if dpath is not None:
fpath_list = [ut.relpath_unix(fpath_, dpath) for fpath_ in fpath_list]
if USE_SUBFIGURE:
# References: https://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions#Subfloats
# TODO ? http://tex.stackexchange.com/questions/159290/how-can-i-place-a-vertical-rule-between-subfigures
# Use subfigures
graphics_list = []
sublbl_prefix = label_str if label_str is not None else ''
for count, fpath in enumerate(fpath_list):
"""
print(', '.join([str(x) + ':' + chr(x) for x in range(65, 123)]))
print(', '.join([str(x) + ':' + chr(x) for x in range(97, 123)]))
"""
CHRLBLS = True
if CHRLBLS:
#subchar = chr(97 + count)
subchar = chr(65 + count)
else:
subchar = str(count)
parts = []
subfigure_str = ''
if len(fpath_list) > 1:
parts.append('\\begin{subfigure}[h]{' + graphics_sizestr + '}')
parts.append('\\centering')
graphics_part = '\\includegraphics[width=%s]{%s}' % (width_str, fpath,)
if use_frame:
parts.append('\\fbox{%s}' % (graphics_part,))
else:
parts.append(graphics_part)
if use_sublbls is True or use_sublbls is None and len(fpath_list) > 1:
parts.append('\\caption{}\\label{sub:' + sublbl_prefix + subchar + '}')
if len(fpath_list) > 1:
parts.append('\\end{subfigure}')
subfigure_str = ''.join(parts)
graphics_list.append(subfigure_str)
else:
if True:
graphics_list = [
r'\includegraphics%s{%s}\captionof{figure}{%s}' % (
graphics_sizestr, fpath, 'fd',
#'(' + str(count) + ')'
#'(' + chr(97 + count) + ')'
)
for count, fpath in enumerate(fpath_list)]
else:
graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) for fpath in fpath_list]
#graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) ]
#nRows = len(graphics_list) // nCols
# Add separators
NL = '\n'
if USE_SUBFIGURE:
col_spacer_mid = NL + '~~' + '% --' + NL
col_spacer_end = NL + r'\\' + '% --' + NL
else:
col_spacer_mid = NL + '&' + NL
col_spacer_end = NL + r'\\' + nlsep + NL
sep_list = [
col_spacer_mid if count % nCols > 0 else col_spacer_end
for count in range(1, len(graphics_list) + 1)
]
if len(sep_list) > 0:
sep_list[-1] = ''
graphics_list_ = [graphstr + sep for graphstr, sep in zip(graphics_list, sep_list)]
#graphics_body = '\n&\n'.join(graphics_list)
graphics_body = ''.join(graphics_list_)
header_str = colpos_sep.join(['c'] * nCols)
if USE_SUBFIGURE:
figure_body = graphics_body
else:
figure_body = ut.codeblock(
r'''
\begin{tabular}{%s}
%s
\end{tabular}
'''
) % (header_str, graphics_body)
if caption_str is not None:
#tabular_body += '\n\caption{\\footnotesize{%s}}' % (caption_str,)
if label_str is not None:
figure_body += '\n\caption[%s]{%s}' % (label_str, caption_str,)
else:
figure_body += '\n\caption{%s}' % (caption_str,)
if label_str is not None:
figure_body += '\n\label{fig:%s}' % (label_str,)
#figure_fmtstr = ut.codeblock(
# r'''
# \begin{figure*}
# \begin{center}
# %s
# \end{center}
# \end{figure*}
# '''
#)
figure_fmtstr = ut.codeblock(
r'''
\begin{figure}[ht!]
\centering
%s
\end{figure}
'''
)
figure_str = figure_fmtstr % (figure_body)
return figure_str
|
java
|
public static <E, K extends Comparable> int binarySearch(
List<E> list,
Function<? super E, K> keyFunction,
@Nullable K key,
KeyPresentBehavior presentBehavior,
KeyAbsentBehavior absentBehavior) {
return binarySearch(
list, keyFunction, key, Ordering.natural(), presentBehavior, absentBehavior);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.