language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java
|
public Parser<RECORD> addParseTarget(final String setterMethodName,
final SetterPolicy setterPolicy,
final String fieldValue) throws NoSuchMethodException {
Method method;
try {
method = recordClass.getMethod(setterMethodName, String.class);
} catch (NoSuchMethodException a) {
try {
method = recordClass.getMethod(setterMethodName, String.class, String.class);
} catch (NoSuchMethodException b) {
try {
method = recordClass.getMethod(setterMethodName, String.class, Long.class);
} catch (NoSuchMethodException c) {
try {
method = recordClass.getMethod(setterMethodName, String.class, Double.class);
} catch (NoSuchMethodException d) {
try {
method = recordClass.getMethod(setterMethodName, Long.class);
} catch (NoSuchMethodException e) {
try {
method = recordClass.getMethod(setterMethodName, Double.class);
} catch (NoSuchMethodException f) {
throw new NoSuchMethodException(
"Unable to find any valid form of the method " + setterMethodName +
" in the class " + recordClass.getCanonicalName());
}
}
}
}
}
}
addParseTarget(method, setterPolicy, Collections.singletonList(fieldValue));
return this;
}
|
java
|
private IQTree projectAwayUnnecessaryVariables(IQTree child, IQProperties currentIQProperties) {
if (child.getRootNode() instanceof ConstructionNode) {
ConstructionNode constructionNode = (ConstructionNode) child.getRootNode();
AscendingSubstitutionNormalization normalization = normalizeAscendingSubstitution(
constructionNode.getSubstitution(), projectedVariables);
Optional<ConstructionNode> proposedConstructionNode = normalization.generateTopConstructionNode();
if (proposedConstructionNode
.filter(c -> c.isSyntacticallyEquivalentTo(constructionNode))
.isPresent())
return child;
IQTree grandChild = normalization.normalizeChild(((UnaryIQTree) child).getChild());
return proposedConstructionNode
.map(c -> (IQTree) iqFactory.createUnaryIQTree(c, grandChild, currentIQProperties.declareLifted()))
.orElse(grandChild);
}
else
return child;
}
|
python
|
def download_file_powershell(url, target, headers={}):
"""
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
powershell_cmd = "$request = (new-object System.Net.WebClient);"
for k, v in headers.items():
powershell_cmd += "$request.headers['%s'] = '%s';" % (k, v)
powershell_cmd += "$request.DownloadFile(%(url)r, %(target)r)" % vars()
cmd = [
'powershell',
'-Command',
powershell_cmd,
]
_clean_check(cmd, target)
|
python
|
def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None): # , parse_dates=False):
"""Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
"""
sheetname = sheetname or 0
if isinstance(sheetname, (basestring, float)):
try:
sheetname = int(sheetname)
except (TypeError, ValueError, OverflowError):
sheetname = str(sheetname)
wb = xlrd.open_workbook(path)
# if isinstance(sheetname, int):
# sheet = wb.sheet_by_index(sheetname)
# else:
# sheet = wb.sheet_by_name(sheetname)
# assert(not parse_dates, "`parse_dates` argument and function not yet implemented!")
# table = [sheet.row_values(i) for i in range(sheet.nrows)]
return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd')
|
java
|
public static boolean isFragment(Bundle bundle) {
Dictionary<String, String> headers = bundle.getHeaders();
return headers.get(Constants.FRAGMENT_HOST) != null;
}
|
java
|
@Override
public <B> MaybeT<M, A> discardR(Applicative<B, MonadT<M, Maybe<?>, ?>> appB) {
return MonadT.super.discardR(appB).coerce();
}
|
python
|
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
|
java
|
protected ActionExecute createActionExecute(ActionMapping actionMapping, Method executeMethod) {
final Execute anno = getExecuteAnnotation(executeMethod); // exists, already checked
final ExecuteOption executeOption = createExecuteOption(anno);
return newActionExecute(actionMapping, executeMethod, executeOption);
}
|
python
|
def fixcode(**kwargs):
"""
auto pep8 format all python file in ``source code`` and ``tests`` dir.
"""
# repository direcotry
repo_dir = Path(__file__).parent.absolute()
# source code directory
source_dir = Path(repo_dir, package.__name__)
if source_dir.exists():
print("Source code locate at: '%s'." % source_dir)
print("Auto pep8 all python file ...")
source_dir.autopep8(**kwargs)
else:
print("Source code directory not found!")
# unittest code directory
unittest_dir = Path(repo_dir, "tests")
if unittest_dir.exists():
print("Unittest code locate at: '%s'." % unittest_dir)
print("Auto pep8 all python file ...")
unittest_dir.autopep8(**kwargs)
else:
print("Unittest code directory not found!")
print("Complete!")
|
java
|
public PagedList<KeyItem> getKeysNext(final String nextPageLink) {
ServiceResponse<Page<KeyItem>> response = getKeysNextSinglePageAsync(nextPageLink).toBlocking().single();
return new PagedList<KeyItem>(response.body()) {
@Override
public Page<KeyItem> nextPage(String nextPageLink) {
return getKeysNextSinglePageAsync(nextPageLink).toBlocking().single().body();
}
};
}
|
java
|
private void putDataPoint(int i, Entry<Long, Double> datapoint) {
timestamps[i] = datapoint.getKey();
values[i] = datapoint.getValue();
}
|
python
|
def natural_sort(list, key=lambda s:s):
"""
Sort the list into natural alphanumeric order.
"""
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
list.sort(key=sort_key)
|
python
|
def _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData:
"""Create empty epoch data double dict."""
if streams is None:
streams = [self._train_stream_name] + self._extra_streams
return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams])
|
python
|
def delete(self, space_id):
"""
Deletes a space by ID.
"""
try:
self.space_id = space_id
return super(SpacesProxy, self).delete(space_id)
finally:
self.space_id = None
|
java
|
public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate,
MatchKind matchKind) {
Objects.requireNonNull(predicate);
Objects.requireNonNull(matchKind);
class MatchSink extends BooleanTerminalSink<Integer> implements Sink.OfInt {
MatchSink() {
super(matchKind);
}
@Override
public void accept(int t) {
if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
stop = true;
value = matchKind.shortCircuitResult;
}
}
}
return new MatchOp<>(StreamShape.INT_VALUE, matchKind, MatchSink::new);
}
|
java
|
public List<Integer> findIntValues(JvmAnnotationTarget op, Class<? extends Annotation> annotationType) {
final JvmAnnotationReference reference = this.lookup.findAnnotation(op, annotationType);
if (reference != null) {
return findIntValues(reference);
}
return null;
}
|
java
|
public Jdbi build(Environment environment,
PooledDataSourceFactory configuration,
String name) {
final ManagedDataSource dataSource = configuration.build(environment.metrics(), name);
return build(environment, configuration, dataSource, name);
}
|
python
|
def default_output_name(self, input_file):
""" Derive a default output name from the ELF name. """
irom_segment = self.get_irom_segment()
if irom_segment is not None:
irom_offs = irom_segment.addr - ESP8266ROM.IROM_MAP_START
else:
irom_offs = 0
return "%s-0x%05x.bin" % (os.path.splitext(input_file)[0],
irom_offs & ~(ESPLoader.FLASH_SECTOR_SIZE - 1))
|
java
|
private boolean willAttributeModifyModel(String attributeName, Object newValue) {
Object currentValue = get(attributeName);
return currentValue != null ? !currentValue.equals(newValue) : newValue != null;
}
|
java
|
public ClientNetworkConfig addOutboundPortDefinition(String portDef) {
if (outboundPortDefinitions == null) {
outboundPortDefinitions = new HashSet<String>();
}
outboundPortDefinitions.add(portDef);
return this;
}
|
java
|
public static ILigand[] order(ILigand[] ligands) {
ILigand[] newLigands = new ILigand[ligands.length];
System.arraycopy(ligands, 0, newLigands, 0, ligands.length);
Arrays.sort(newLigands, cipRule);
return newLigands;
}
|
python
|
def create(self, ignore=None):
"""Yield tuple with created index name and responses from a client."""
ignore = ignore or []
def _create(tree_or_filename, alias=None):
"""Create indices and aliases by walking DFS."""
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _create(value, alias=name):
yield result
else:
with open(value, 'r') as body:
yield name, self.client.indices.create(
index=name,
body=json.load(body),
ignore=ignore,
)
if alias:
yield alias, self.client.indices.put_alias(
index=list(_get_indices(tree_or_filename)),
name=alias,
ignore=ignore,
)
for result in _create(self.active_aliases):
yield result
|
python
|
def resolve(self):
"""Determine the final input value."""
if self.source:
result = self.source[1][self.source[0]]
if result:
return result
return self.default
|
java
|
public void jumpahead(int count) {
if (count < 0) {
throw new IllegalArgumentException();
}
if (buf != null) {
bufPos += count;
if (bufPos > buf.length) {
throw new IllegalArgumentException();
}
if (bufPos == buf.length) {
buf = null;
}
} else {
int i = pos.getIndex() + count;
pos.setIndex(i);
if (i > text.length()) {
throw new IllegalArgumentException();
}
}
}
|
python
|
def as_int(width, height):
"Return an integer ratio tuple like (16, 9)."
gcd = get_gcd(width, height)
return int(width / gcd), int(height / gcd)
|
python
|
def forget_fact(term):
"""
Forgets a fact by removing it from the database
"""
logger.info('Removing fact %s', term)
db.facts.remove({'term': term_regex(term)})
return random.choice(ACKS)
|
java
|
@Override
public CallableStatement prepareCall(String sql) throws SQLException
{
return ProxyFactory.getProxyCallableStatement(this, trackStatement(delegate.prepareCall(sql)));
}
|
java
|
public Observable<ServiceResponse<FoundFaces>> findFacesWithServiceResponseAsync(FindFacesOptionalParameter findFacesOptionalParameter) {
if (this.client.baseUrl() == null) {
throw new IllegalArgumentException("Parameter this.client.baseUrl() is required and cannot be null.");
}
final Boolean cacheImage = findFacesOptionalParameter != null ? findFacesOptionalParameter.cacheImage() : null;
return findFacesWithServiceResponseAsync(cacheImage);
}
|
java
|
private void getControllingProcessDirectory(long timestamp, String pid) {
int count = 0;
while (ivSubDirectory == null) {
ivSubDirectory = makeLogDirectory(timestamp, pid);
if (ivSubDirectory == null) {
if (++count > FAILED_MAX_COUNT) {
ivSubDirectory = makeLogDirectory(timestamp, pid,true);
if(ivSubDirectory==null){
if (debugLogger.isLoggable(Level.FINE) && isDebugEnabled())
{
debugLogger.logp(Level.FINE, thisClass, "UnableToMakeDirectory", "Unable to create instance directory forcefully , throwing Runtime Exception");
}
throw new RuntimeException("Failed to create instance log repository. See SystemOut.log for details.");
}
}
try {
Thread.sleep(FAILED_WAIT_TIME);
} catch (InterruptedException ex) {
// Ignore it, assume that we had enough sleep.
}
}
}
}
|
java
|
@Override
public double validate(IMolecularFormula formula) throws CDKException {
logger.info("Start validation of ", formula);
double totalExactMass = MolecularFormulaManipulator.getTotalExactMass(formula);
if (Math.abs(totalExactMass - mass) > tolerance)
return 0.0;
else
return 1.0;
}
|
python
|
def get_instance(self, payload):
"""
Build an instance of EventInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.event.EventInstance
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventInstance
"""
return EventInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
|
python
|
def _retry(self, state):
'''
Starts a single try of the whole restart path.
'''
state.attempt += 1
self.debug('Starting restart attempt: %d.', state.attempt)
if self._cmp_strategy(RestartStrategy.buryme):
self.debug('Agent %r is going to by buried according to his '
'last will.', state.factory.descriptor_type)
return self._send_buried_notifications()
else:
f = self._set_restart_flag()
f.add_callback(fiber.drop_param, self._send_died_notifications)
f.add_both(self._ensure_someone_took_responsability)
return f
|
python
|
def find_parent_id_for_component(self, component_id):
"""
Given the URL to a component, returns the parent component's URL.
:param string component_id: The URL of the component.
:return: A tuple containing:
* The type of the parent record; valid values are ArchivesSpaceClient.RESOURCE and ArchivesSpaceClient.RESOURCE_COMPONENT.
* The URL of the parent record.
If the provided URL fragment references a resource, this method will simply return the same URL.
:rtype tuple:
"""
response = self.get_record(component_id)
if "parent" in response:
return (ArchivesSpaceClient.RESOURCE_COMPONENT, response["parent"]["ref"])
# if this is the top archival object, return the resource instead
elif "resource" in response:
return (ArchivesSpaceClient.RESOURCE, response["resource"]["ref"])
# resource was passed in, which has no higher-up record;
# return the same ID
else:
return (ArchivesSpaceClient.RESOURCE, component_id)
|
java
|
public static Type getOuter(final Type type) {
if (type instanceof ParameterizedType) {
// could contain outer generics
return ((ParameterizedType) type).getOwnerType();
}
return isInner(type)
? GenericsUtils.resolveClassIgnoringVariables(type).getEnclosingClass()
: null;
}
|
java
|
private void handleSerialApiGetInitDataResponse(
SerialMessage incomingMessage) {
logger.debug(String.format("Got MessageSerialApiGetInitData response."));
this.isConnected = true;
int nodeBytes = incomingMessage.getMessagePayloadByte(2);
if (nodeBytes != NODE_BYTES) {
logger.error("Invalid number of node bytes = {}", nodeBytes);
return;
}
int nodeId = 1;
// loop bytes
for (int i = 3;i < 3 + nodeBytes;i++) {
int incomingByte = incomingMessage.getMessagePayloadByte(i);
// loop bits in byte
for (int j=0;j<8;j++) {
int b1 = incomingByte & (int)Math.pow(2.0D, j);
int b2 = (int)Math.pow(2.0D, j);
if (b1 == b2) {
logger.info(String.format("Found node id = %d", nodeId));
// Place nodes in the local ZWave Controller
this.zwaveNodes.put(nodeId, new ZWaveNode(this.homeId, nodeId, this));
this.getNode(nodeId).advanceNodeStage();
}
nodeId++;
}
}
logger.info("------------Number of Nodes Found Registered to ZWave Controller------------");
logger.info(String.format("# Nodes = %d", this.zwaveNodes.size()));
logger.info("----------------------------------------------------------------------------");
// Advance node stage for the first node.
}
|
java
|
static void checkNotNull(Object o, String arg) {
if (o == null) {
throw new IllegalArgumentException(String.format("'%s' must not be null.", arg));
}
}
|
python
|
def _parse_accounts(config):
"""
read accounts information from config
:param config: valit alot config
:type config: `configobj.ConfigObj`
:returns: list of accounts
"""
accounts = []
if 'accounts' in config:
for acc in config['accounts'].sections:
accsec = config['accounts'][acc]
args = dict(config['accounts'][acc].items())
# create abook for this account
abook = accsec['abook']
logging.debug('abook defined: %s', abook)
if abook['type'] == 'shellcommand':
cmd = abook['command']
regexp = abook['regexp']
if cmd is not None and regexp is not None:
ef = abook['shellcommand_external_filtering']
args['abook'] = ExternalAddressbook(
cmd, regexp, external_filtering=ef)
else:
msg = 'underspecified abook of type \'shellcommand\':'
msg += '\ncommand: %s\nregexp:%s' % (cmd, regexp)
raise ConfigError(msg)
elif abook['type'] == 'abook':
contacts_path = abook['abook_contacts_file']
args['abook'] = AbookAddressBook(
contacts_path, ignorecase=abook['ignorecase'])
else:
del args['abook']
cmd = args['sendmail_command']
del args['sendmail_command']
newacc = SendmailAccount(cmd, **args)
accounts.append(newacc)
return accounts
|
python
|
def uuid4(self):
"""Make an id in the format of UUID4, but keep in mind this could very well be pseudorandom, and if it is you'll not be truely random, and can regenerate same id if same seed"""
return ''.join([hexchars[self.randint(0,15)] for x in range(0,8)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,4)]) + '-' +\
'4'+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
uuid4special[self.randint(0,3)]+''.join([hexchars[self.randint(0,15)] for x in range(0,3)]) + '-' +\
''.join([hexchars[self.randint(0,15)] for x in range(0,12)])
|
java
|
private void ensureLookahead(int lookahead) {
for (int i = savedTokens.size() ; i < lookahead ; i ++) {
savedTokens.add(tokenizer.readToken());
}
}
|
java
|
public ChangesHolder getChanges(final Set<String> removedNodes, final Set<String> addedNodes)
{
if (handler != null)
{
Iterator<NodeData> addedStates = new Iterator<NodeData>()
{
private final Iterator<String> iter = addedNodes.iterator();
public boolean hasNext()
{
return iter.hasNext();
}
public NodeData next()
{
// cycle till find a next or meet the end of set
do
{
String id = iter.next();
try
{
ItemData item = itemMgr.getItemData(id);
if (item != null)
{
if (item.isNode())
{
if (!indexingTree.isExcluded(item))
{
return (NodeData)item;
}
}
else
{
LOG.warn("Node not found, but property " + id + ", " + item.getQPath().getAsString()
+ " found. ");
}
}
else
{
LOG.warn("Unable to index node with id " + id + ", node does not exist.");
}
}
catch (RepositoryException e)
{
LOG.error("Can't read next node data " + id, e);
}
}
while (iter.hasNext()); // get next if error or node not found
return null; // we met the end of iterator set
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
Iterator<String> removedIds = new Iterator<String>()
{
private final Iterator<String> iter = removedNodes.iterator();
public boolean hasNext()
{
return iter.hasNext();
}
public String next()
{
return nextNodeId();
}
public String nextNodeId() throws NoSuchElementException
{
return iter.next();
}
public void remove()
{
throw new UnsupportedOperationException();
}
};
if (removedNodes.size() > 0 || addedNodes.size() > 0)
{
return handler.getChanges(removedIds, addedStates);
}
}
return null;
}
|
java
|
public static void validateNotEmptyAndNotEqual(Object t1, Object t2, String errorMsg) throws ValidateException {
validateNotEmpty(t1, errorMsg);
validateNotEqual(t1, t2, errorMsg);
}
|
python
|
def update(self):
'''
Update the income process, the assets grid, the permanent income grid,
the medical shock distribution, and the terminal solution.
Parameters
----------
none
Returns
-------
none
'''
self.updateIncomeProcess()
self.updateAssetsGrid()
self.updatepLvlNextFunc()
self.updatepLvlGrid()
self.updateMedShockProcess()
self.updateSolutionTerminal()
|
python
|
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
|
java
|
protected boolean isRestoreGoalPresent() {
Plugin ekstaziPlugin = lookupPlugin(EKSTAZI_PLUGIN_KEY);
if (ekstaziPlugin == null) {
return false;
}
for (Object execution : ekstaziPlugin.getExecutions()) {
for (Object goal : ((PluginExecution) execution).getGoals()) {
if (((String) goal).equals("restore")) {
return true;
}
}
}
return false;
}
|
python
|
def toTFExample(dtypes):
"""mapPartition function to convert a Spark RDD of Row into an RDD of serialized tf.train.Example bytestring.
Note that tf.train.Example is a fairly flat structure with limited datatypes, e.g. tf.train.FloatList,
tf.train.Int64List, and tf.train.BytesList, so most DataFrame types will be coerced into one of these types.
Args:
:dtypes: the DataFrame.dtypes of the source DataFrame.
Returns:
A mapPartition function which converts the source DataFrame into tf.train.Example bytestrings.
"""
def _toTFExample(iter):
# supported type mappings between DataFrame.dtypes and tf.train.Feature types
float_dtypes = ['float', 'double']
int64_dtypes = ['boolean', 'tinyint', 'smallint', 'int', 'bigint', 'long']
bytes_dtypes = ['binary', 'string']
float_list_dtypes = ['array<float>', 'array<double>']
int64_list_dtypes = ['array<boolean>', 'array<tinyint>', 'array<smallint>', 'array<int>', 'array<bigint>', 'array<long>']
def _toTFFeature(name, dtype, row):
feature = None
if dtype in float_dtypes:
feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=[row[name]])))
elif dtype in int64_dtypes:
feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=[row[name]])))
elif dtype in bytes_dtypes:
if dtype == 'binary':
feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[bytes(row[name])])))
else:
feature = (name, tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(row[name]).encode('utf-8')])))
elif dtype in float_list_dtypes:
feature = (name, tf.train.Feature(float_list=tf.train.FloatList(value=row[name])))
elif dtype in int64_list_dtypes:
feature = (name, tf.train.Feature(int64_list=tf.train.Int64List(value=row[name])))
else:
raise Exception("Unsupported dtype: {0}".format(dtype))
return feature
results = []
for row in iter:
features = dict([_toTFFeature(name, dtype, row) for name, dtype in dtypes])
example = tf.train.Example(features=tf.train.Features(feature=features))
results.append((bytearray(example.SerializeToString()), None))
return results
return _toTFExample
|
python
|
def equal(actual, expected):
'''
Compare actual and expected using ==
>>> expect = Expector([])
>>> expect(1).to_not(equal, 2)
(True, 'equal: expect 1 == 2')
>>> expect(1).to(equal, 1)
(True, 'equal: expect 1 == 1')
'''
is_passing = (actual == expected)
types_to_diff = (str, dict, list, tuple)
if not is_passing and isinstance(expected, types_to_diff) and isinstance(actual, types_to_diff):
readable_diff = difflib.unified_diff(pformat(expected).split('\n'),
pformat(actual).split('\n'), n=99)
description = '\n'.join(['equal:'] + list(readable_diff))
else:
description = "equal: expect {} == {}".format(actual, expected)
outcome = (is_passing, description)
return outcome
|
java
|
public static void ensureMouseOver(Element element) {
NativeEvent nativeEvent = Document.get().createMouseOverEvent(
0,
0,
0,
0,
0,
false,
false,
false,
false,
0,
null);
element.dispatchEvent(nativeEvent);
}
|
java
|
public void saveModification(MaterialInstance materialInstance, Modification modification) {
modification.setMaterialInstance(materialInstance);
try {
getHibernateTemplate().saveOrUpdate(modification);
removeLatestCachedModification(materialInstance, modification);
removeCachedModificationCountFor(materialInstance);
removeCachedModificationsFor(materialInstance);
} catch (Exception e) {
String message = "Cannot save modification " + modification;
LOGGER.error(message, e);
throw new RuntimeException(message, e);
}
}
|
java
|
@Override
public final void fill(final Map<String, Object> pAddParam,
final Object pEntity, final String pFieldName,
final String pFieldStrValue) throws Exception {
if (SeSeller.class != pEntity.getClass()) {
throw new ExceptionWithCode(ExceptionWithCode
.CONFIGURATION_MISTAKE, "It's wrong service to fill that field: "
+ pEntity + "/" + pFieldName + "/" + pFieldStrValue);
}
SeSeller seSeller = (SeSeller) pEntity;
if ("NULL".equals(pFieldStrValue)) {
seSeller.setUserAuth(null);
return;
}
try {
UserTomcat ownedEntity = new UserTomcat();
ownedEntity.setItsUser(pFieldStrValue);
seSeller.setUserAuth(ownedEntity);
} catch (Exception ex) {
throw new ExceptionWithCode(ExceptionWithCode
.WRONG_PARAMETER, "Can not fill field: " + pEntity + "/" + pFieldName
+ "/" + pFieldStrValue + ", " + ex.getMessage(), ex);
}
}
|
java
|
private void centerOnCurrentItem() {
if(!mPieData.isEmpty()) {
PieModel current = mPieData.get(getCurrentItem());
int targetAngle;
if(mOpenClockwise) {
targetAngle = (mIndicatorAngle - current.getStartAngle()) - ((current.getEndAngle() - current.getStartAngle()) / 2);
if (targetAngle < 0 && mPieRotation > 0) targetAngle += 360;
}
else {
targetAngle = current.getStartAngle() + (current.getEndAngle() - current.getStartAngle()) / 2;
targetAngle += mIndicatorAngle;
if (targetAngle > 270 && mPieRotation < 90) targetAngle -= 360;
}
mAutoCenterAnimator.setIntValues(targetAngle);
mAutoCenterAnimator.setDuration(AUTOCENTER_ANIM_DURATION).start();
}
}
|
java
|
protected void moskitoDoPost(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException {
super.doPost(req, res);
}
|
python
|
def get_keystone_session(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Return a keystone session object"""
ep = self.get_keystone_endpoint(keystone_ip,
api_version=api_version,
admin_port=admin_port)
if api_version == 2:
auth = v2.Password(
username=username,
password=password,
tenant_name=project_name,
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
else:
auth = v3.Password(
user_domain_name=user_domain_name,
username=username,
password=password,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name,
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
return (sess, auth)
|
java
|
public <I, V> void add(Option<I, V> option, V value) {
getOrCreateValueContainer( option ).add( option.getOptionIdentifier(), value );
}
|
python
|
def is_installed(prog):
"""Return whether or not a given executable is installed on the machine."""
with open(os.devnull, 'w') as devnull:
try:
if os.name == 'nt':
retcode = subprocess.call(['where', prog], stdout=devnull)
else:
retcode = subprocess.call(['which', prog], stdout=devnull)
except OSError as e:
# If where or which doesn't exist, a "ENOENT" error will occur (The
# FileNotFoundError subclass on Python 3).
if e.errno != errno.ENOENT:
raise
retcode = 1
return retcode == 0
|
java
|
protected <E extends Event> void linkWave(final Node node, final javafx.event.EventType<E> eventType, final WaveType waveType,
final WaveData<?>... waveData) {
linkWave(node, eventType, waveType, null, waveData);
}
|
java
|
public Bits readAhead(final int bytes) throws IOException {
assert (0 < bytes);
if (0 < bytes) {
final byte[] buffer = new byte[bytes];
int bytesRead = this.inner.read(buffer);
if (bytesRead > 0) {
this.remainder = this.remainder.concatenate(new Bits(Arrays.copyOf(buffer, bytesRead)));
}
}
return this.remainder;
}
|
python
|
def write_values(self, data, filepath=None, filename=None, indent=None, keys_to_write=None):
"""
Tries to write extra content to a JSON file.
Creates filename.temp with updated content, removes the old file and
finally renames the .temp to match the old file.
This is in effort to preserve the data in case of some weird errors cause problems.
:param filepath: Path to file
:param filename: Name of file
:param data: Data to write as a dictionary
:param indent: indent level for pretty printing the resulting file
:param keys_to_write: array of keys that are to be picked from data and written to file.
Default is None, when all data is written to file.
:return: Path to file used
:raises EnvironmentError ValueError
"""
name = filename if filename else self.filename
path = filepath if filepath else self.filepath
name = self._ends_with(name, ".json")
path = self._ends_with(path, os.path.sep)
if not os.path.isfile(path + name):
try:
return self.write_file(data, path, name, indent, keys_to_write)
except EnvironmentError as error:
self.logger.error("Error while opening or writing to file: {}".format(error))
raise
except ValueError:
raise
if keys_to_write:
data_to_write = {}
for key in keys_to_write:
data_to_write[key] = data[key]
else:
data_to_write = data
try:
with open(path + name, 'r') as fil:
output = json.load(fil)
self.logger.info("Read contents of {}".format(filename))
for key in data_to_write:
try:
output[key] = data_to_write[key]
except TypeError as error:
self.logger.error(
"File contents could not be serialized into a dict. {}".format(error))
raise
self._write_json(path, name + ".temp", "w", output, indent)
FileUtils.remove_file(name, path)
FileUtils.rename_file(name + '.temp', name, path)
return os.path.join(path, name)
except EnvironmentError as error:
self.logger.error(
"Error while writing to, opening or reading the file: {}".format(error))
raise
except ValueError as error:
self.logger.error(
"File could not be decoded to JSON. It might be empty? {}".format(error))
try:
self._write_json(path, name, "w", data_to_write, indent)
return os.path.join(path, name)
except EnvironmentError:
raise
|
python
|
def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None, initial_std_dev=0.4,
factorized_noise=True):
""" Vel factory function """
if input_block is None:
input_block = IdentityFactory()
return NoisyQModelFactory(
input_block=input_block, backbone=backbone, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise
)
|
python
|
def _did_save(self, connection):
""" Launched when save has been successfully executed """
self._new_password = None
controller = NURESTSession.get_current_session().login_controller
controller.password = None
controller.api_key = self.api_key
if connection.async:
callback = connection.callbacks['remote']
if connection.user_info:
callback(connection.user_info, connection)
else:
callback(self, connection)
else:
return (self, connection)
|
python
|
def dump(args):
"""
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
"""
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=fw)
fw.close()
|
java
|
public void setTopContributors(java.util.Collection<Contributor> topContributors) {
if (topContributors == null) {
this.topContributors = null;
return;
}
this.topContributors = new java.util.ArrayList<Contributor>(topContributors);
}
|
java
|
public List<UUID> spawn(int nbAgents, Class<? extends Agent> agent, Object... params) {
return this.spawnService.spawn(nbAgents, null, this.janusContext, null, agent, params);
}
|
python
|
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
vocab_size = self._problem_hparams.vocab_size[feature_name]
if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"):
vocab_size += (-vocab_size) % self._hparams.vocab_divisor
modality_name = self._hparams.name.get(
feature_name,
modalities.get_name(modality))(self._hparams, vocab_size)
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_name, feature_name)
else:
variable_scope_name = modality_name
bottom = self._hparams.bottom.get(
feature_name,
modalities.get_targets_bottom(modality))
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
else:
bottom = self._hparams.bottom.get(feature_name,
modalities.get_bottom(modality))
do_reuse = modality_name in all_previous_modalities
with tf.variable_scope(modality_name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_name)
transformed_features[feature_name] = bottom(features[feature_name],
self._hparams,
vocab_size)
all_previous_modalities.append(modality_name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
|
java
|
@AroundInvoke
public Object notSupported(final InvocationContext context) throws Exception {
return runUnderUOWNoEnablement(UOWSynchronizationRegistry.UOW_TYPE_LOCAL_TRANSACTION, true, context, "NOT_SUPPORTED");
}
|
python
|
def metalayerize(user_func):
"""Wrap a function over a sequence of layers and an input into a layer."""
def returned(layers, *args, **kwargs):
def begin_update(X, *args, **kwargs):
return user_func(layers, X, *args, **kwargs)
return FunctionLayer(begin_update, *args, **kwargs)
return returned
|
java
|
public void savePlayerInfo(GamePlayerInfo playerInfo, final SaveInfoHandler handler){
HMSAgentLog.i("savePlayerInfo:playerInfo=" + StrUtils.objDesc(playerInfo) + " handler=" + StrUtils.objDesc(handler));
this.playerInfo = playerInfo;
this.handler = handler;
this.retryTimes = MAX_RETRY_TIMES;
connect();
}
|
java
|
@Override
public Future<?> buildAsync(final Callable<?> builder, final HttpServletRequest req) {
return getExecutors().getBuildExecutor().submit(new Callable<Object>() {
public Object call() throws Exception {
AbstractAggregatorImpl.this.currentRequest.set(req);
Object result;
try {
result = builder.call();
} finally {
AbstractAggregatorImpl.this.currentRequest.set(null);
}
return result;
}
});
}
|
java
|
public static void start(String name, Version version, Class<?> classResource)
{
Engine.start(new EngineHeadless(name, version, classResource));
}
|
python
|
def deletable(self):
"""True if the MessageHandler can be deleted."""
return bool(lib.EnvIsDefmessageHandlerDeletable(
self._env, self._cls, self._idx))
|
python
|
def geometryType(self):
""" returns the feature's geometry type """
if self._geomType is None:
if self.geometry is not None:
self._geomType = self.geometry.type
else:
self._geomType = "Table"
return self._geomType
|
python
|
def GetOutputPluginIndex(
plugin_descriptors,
plugin_id):
"""Gets an output plugin index for a plugin with a given id.
Historically output plugins descriptors were stored in dicts-like
structures with unique identifiers as keys. In REL_DB-based implementation,
however, both plugin descriptors and their states are stored in flat
lists (see Flow definition in flows.proto).
The ids were formed as "<plugin name>_<plugin index>" where plugin index
was incremented for every plugin with a same name. For example, if we had
EmailOutputPlugin and 2 BigQueryOutputPlugins, their ids would be:
EmailOutputPlugin_0, BigQueryOutputPlugin_0, BigQueryOutputPlugin_1.
To preserve backwards API compatibility, we emulate the old behavior by
identifying plugins with same plugin ids as before..
Args:
plugin_descriptors: An iterable of OutputPluginDescriptor objects.
plugin_id: Plugin id to search for.
Returns:
An index of a plugin in plugin_descriptors iterable corresponding to a
given plugin_id.
Raises:
OutputPluginNotFoundError: if no plugin corresponding to a given plugin_id
was found.
"""
used_names = collections.Counter()
for (index, desc) in enumerate(plugin_descriptors):
cur_plugin_id = "%s_%d" % (desc.plugin_name, used_names[desc.plugin_name])
used_names[desc.plugin_name] += 1
if cur_plugin_id == plugin_id:
return index
raise OutputPluginNotFoundError("Can't find output plugin %s" % plugin_id)
|
python
|
def p_expression_div(self, p):
'expression : expression DIVIDE expression'
p[0] = Divide(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
java
|
public DescribeClientVpnEndpointsRequest withClientVpnEndpointIds(String... clientVpnEndpointIds) {
if (this.clientVpnEndpointIds == null) {
setClientVpnEndpointIds(new com.amazonaws.internal.SdkInternalList<String>(clientVpnEndpointIds.length));
}
for (String ele : clientVpnEndpointIds) {
this.clientVpnEndpointIds.add(ele);
}
return this;
}
|
python
|
def _create_archive(self):
'''This will create a tar.gz compressed archive of the scrubbed directory'''
try:
self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session)
self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path)
t = tarfile.open(self.archive_path, 'w:gz')
for dirpath, dirnames, filenames in os.walk(self.dir_path):
for f in filenames:
f_full = os.path.join(dirpath, f)
f_archive = f_full.replace(self.report_dir,'')
self.logger.debug('adding %s to %s archive', f_archive, self.archive_path)
t.add(f_full, arcname=f_archive)
except Exception as e: #pragma: no cover
self.logger.exception(e)
raise Exception('CreateArchiveError: Unable to create Archive')
self._clean_up()
self.logger.info('Archiving Complete')
self.logger.con_out('SOSCleaner Complete')
if not self.quiet: # pragma: no cover
t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,''))
t.close()
|
java
|
public static JMenuBar getAlignmentPanelMenu(JFrame frame,
ActionListener actionListener, AFPChain afpChain,
MultipleAlignment msa){
JMenuBar menu = new JMenuBar();
JMenu file= new JMenu("File");
file.getAccessibleContext().setAccessibleDescription("File Menu");
menu.add(file);
ImageIcon saveicon = createImageIcon("/icons/filesave.png");
JMenuItem saveF = null;
if (saveicon != null)
saveF = new JMenuItem("Save text display", saveicon);
else
saveF = new JMenuItem("Save text display");
saveF.setMnemonic(KeyEvent.VK_S);
MySaveFileListener listener = new MySaveFileListener(afpChain, msa);
listener.setTextOutput(true);
saveF.addActionListener(listener);
file.add(saveF);
file.addSeparator();
JMenuItem print = getPrintMenuItem();
print.addActionListener(actionListener);
file.add(print);
file.addSeparator();
JMenuItem closeI = MenuCreator.getCloseMenuItem(frame);
file.add(closeI);
JMenuItem exitI = MenuCreator.getExitMenuItem();
file.add(exitI);
JMenu edit = new JMenu("Edit");
edit.setMnemonic(KeyEvent.VK_E);
menu.add(edit);
JMenuItem eqrI = MenuCreator.getIcon(actionListener,SELECT_EQR);
edit.add(eqrI);
JMenuItem eqrcI = MenuCreator.getIcon(actionListener,EQR_COLOR);
edit.add(eqrcI);
JMenuItem simI = MenuCreator.getIcon(actionListener, SIMILARITY_COLOR);
edit.add(simI);
JMenuItem fatcatI = MenuCreator.getIcon(actionListener, FATCAT_BLOCK );
edit.add(fatcatI);
JMenu view= new JMenu("View");
view.getAccessibleContext().setAccessibleDescription("View Menu");
view.setMnemonic(KeyEvent.VK_V);
menu.add(view);
JMenuItem textI = MenuCreator.getIcon(actionListener,TEXT_ONLY);
view.add(textI);
JMenuItem fastaI = MenuCreator.getIcon(actionListener,FASTA_FORMAT);
view.add(fastaI);
JMenuItem pairsI = MenuCreator.getIcon(actionListener,PAIRS_ONLY);
view.add(pairsI);
JMenuItem textF = MenuCreator.getIcon(actionListener,FATCAT_TEXT);
view.add(textF);
JMenu about = new JMenu("Help");
about.setMnemonic(KeyEvent.VK_A);
JMenuItem helpM = MenuCreator.getHelpMenuItem();
about.add(helpM);
JMenuItem aboutM = MenuCreator.getAboutMenuItem();
about.add(aboutM);
menu.add(Box.createGlue());
menu.add(about);
return menu;
}
|
python
|
def look(table, limit=0, vrepr=None, index_header=None, style=None,
truncate=None, width=None):
"""
Format a portion of the table as text for inspection in an interactive
session. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 1],
... ['b', 2]]
>>> etl.look(table1)
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | 1 |
+-----+-----+
| 'b' | 2 |
+-----+-----+
>>> # alternative formatting styles
... etl.look(table1, style='simple')
=== ===
foo bar
=== ===
'a' 1
'b' 2
=== ===
>>> etl.look(table1, style='minimal')
foo bar
'a' 1
'b' 2
>>> # any irregularities in the length of header and/or data
... # rows will appear as blank cells
... table2 = [['foo', 'bar'],
... ['a'],
... ['b', 2, True]]
>>> etl.look(table2)
+-----+-----+------+
| foo | bar | |
+=====+=====+======+
| 'a' | | |
+-----+-----+------+
| 'b' | 2 | True |
+-----+-----+------+
Three alternative presentation styles are available: 'grid', 'simple' and
'minimal', where 'grid' is the default. A different style can be specified
using the `style` keyword argument. The default style can also be changed
by setting ``petl.config.look_style``.
"""
# determine defaults
if limit == 0:
limit = config.look_limit
if vrepr is None:
vrepr = config.look_vrepr
if index_header is None:
index_header = config.look_index_header
if style is None:
style = config.look_style
if width is None:
width = config.look_width
return Look(table, limit=limit, vrepr=vrepr, index_header=index_header,
style=style, truncate=truncate, width=width)
|
java
|
static FormattingTuple format(final String messagePattern,
Object argA, Object argB) {
return arrayFormat(messagePattern, new Object[]{argA, argB});
}
|
java
|
public static WebDriver getFirefoxDriverWithJSSettings(
final String userAgent, final boolean javascriptEnabled) {
FirefoxProfile profile = new FirefoxProfile();
profile.setPreference("general.useragent.override", userAgent);
profile.setPreference("javascript.enabled", javascriptEnabled);
WebDriver driver = new FirefoxDriver(profile);
return driver;
}
|
java
|
private String getLastWorkflowForUser() {
CmsUser user = getCmsObject().getRequestContext().getCurrentUser();
return (String)user.getAdditionalInfo(PARAM_WORKFLOW_ID);
}
|
java
|
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case BpsimPackage.DISTRIBUTION_PARAMETER__CURRENCY_UNIT:
return getCurrencyUnit();
case BpsimPackage.DISTRIBUTION_PARAMETER__TIME_UNIT:
return getTimeUnit();
}
return super.eGet(featureID, resolve, coreType);
}
|
java
|
private void addQueryParams(final Request request) {
if (status != null) {
request.addQueryParam("Status", status.toString());
}
if (phoneNumber != null) {
request.addQueryParam("PhoneNumber", phoneNumber.toString());
}
if (incomingPhoneNumberSid != null) {
request.addQueryParam("IncomingPhoneNumberSid", incomingPhoneNumberSid);
}
if (friendlyName != null) {
request.addQueryParam("FriendlyName", friendlyName);
}
if (uniqueName != null) {
request.addQueryParam("UniqueName", uniqueName);
}
if (getPageSize() != null) {
request.addQueryParam("PageSize", Integer.toString(getPageSize()));
}
}
|
python
|
def dynamics_from_bundle_bs(b, times, compute=None, return_roche_euler=False, **kwargs):
"""
Parse parameters in the bundle and call :func:`dynamics`.
See :func:`dynamics` for more detailed information.
NOTE: you must either provide compute (the label) OR all relevant options
as kwargs (ltte)
Args:
b: (Bundle) the bundle with a set hierarchy
times: (list or array) times at which to run the dynamics
stepsize: (float, optional) stepsize for the integration
[default: 0.01]
orbiterror: (float, optional) orbiterror for the integration
[default: 1e-16]
ltte: (bool, default False) whether to account for light travel time effects.
Returns:
t, xs, ys, zs, vxs, vys, vzs. t is a numpy array of all times,
the remaining are a list of numpy arrays (a numpy array per
star - in order given by b.hierarchy.get_stars()) for the cartesian
positions and velocities of each star at those same times.
"""
stepsize = 0.01
orbiterror = 1e-16
computeps = b.get_compute(compute, check_visible=False, force_ps=True)
ltte = computeps.get_value('ltte', check_visible=False, **kwargs)
hier = b.hierarchy
starrefs = hier.get_stars()
orbitrefs = hier.get_orbits()
def mean_anom(t0, t0_perpass, period):
# TODO: somehow make this into a constraint where t0 and mean anom
# are both in the compute options if dynamic_method==nbody
# (one is constrained from the other and the orbit.... nvm, this gets ugly)
return 2 * np.pi * (t0 - t0_perpass) / period
masses = [b.get_value('mass', u.solMass, component=component, context='component') * c.G.to('AU3 / (Msun d2)').value for component in starrefs] # GM
smas = [b.get_value('sma', u.AU, component=component, context='component') for component in orbitrefs]
eccs = [b.get_value('ecc', component=component, context='component') for component in orbitrefs]
incls = [b.get_value('incl', u.rad, component=component, context='component') for component in orbitrefs]
per0s = [b.get_value('per0', u.rad, component=component, context='component') for component in orbitrefs]
long_ans = [b.get_value('long_an', u.rad, component=component, context='component') for component in orbitrefs]
t0_perpasses = [b.get_value('t0_perpass', u.d, component=component, context='component') for component in orbitrefs]
periods = [b.get_value('period', u.d, component=component, context='component') for component in orbitrefs]
vgamma = b.get_value('vgamma', context='system', unit=u.solRad/u.d)
t0 = b.get_value('t0', context='system', unit=u.d)
# mean_anoms = [mean_anom(t0, t0_perpass, period) for t0_perpass, period in zip(t0_perpasses, periods)]
mean_anoms = [b.get_value('mean_anom', u.rad, component=component, context='component') for component in orbitrefs]
return dynamics_bs(times, masses, smas, eccs, incls, per0s, long_ans, \
mean_anoms, t0, vgamma, stepsize, orbiterror, ltte,
return_roche_euler=return_roche_euler)
|
java
|
public synchronized I_CmsResourceCollector addContentCollector(String className, String order)
throws CmsConfigurationException {
Class<?> classClazz;
// init class for content collector
try {
classClazz = Class.forName(className);
} catch (ClassNotFoundException e) {
LOG.error(Messages.get().getBundle().key(Messages.LOG_CONTENT_COLLECTOR_CLASS_NOT_FOUND_1, className), e);
return null;
}
I_CmsResourceCollector collector;
try {
collector = (I_CmsResourceCollector)classClazz.newInstance();
} catch (InstantiationException e) {
throw new CmsConfigurationException(
Messages.get().container(Messages.ERR_INVALID_COLLECTOR_NAME_1, className));
} catch (IllegalAccessException e) {
throw new CmsConfigurationException(
Messages.get().container(Messages.ERR_INVALID_COLLECTOR_NAME_1, className));
} catch (ClassCastException e) {
throw new CmsConfigurationException(
Messages.get().container(Messages.ERR_INVALID_COLLECTOR_NAME_1, className));
}
// set the configured order for the collector
int ord = 0;
try {
ord = Integer.valueOf(order).intValue();
} catch (NumberFormatException e) {
LOG.error(Messages.get().getBundle().key(Messages.LOG_COLLECTOR_BAD_ORDER_NUMBER_1, className), e);
}
collector.setOrder(ord);
if (CmsLog.INIT.isInfoEnabled()) {
CmsLog.INIT.info(Messages.get().getBundle().key(Messages.INIT_ADD_COLLECTOR_CLASS_2, className, order));
}
// extend or init the current list of configured collectors
if (m_collectors != null) {
m_collectors = new ArrayList<I_CmsResourceCollector>(m_collectors);
m_collectorNameMappings = new HashMap<String, I_CmsResourceCollector>(m_collectorNameMappings);
} else {
m_collectors = new ArrayList<I_CmsResourceCollector>();
m_collectorNameMappings = new HashMap<String, I_CmsResourceCollector>();
}
if (!m_collectors.contains(collector)) {
// this is a collector not currently configured
m_collectors.add(collector);
Iterator<String> i = collector.getCollectorNames().iterator();
while (i.hasNext()) {
String name = i.next();
if (m_collectorNameMappings.containsKey(name)) {
// this name is already configured, check the order of the collector
I_CmsResourceCollector otherCollector = m_collectorNameMappings.get(name);
if (collector.getOrder() > otherCollector.getOrder()) {
// new collector has a greater order than the old collector in the Map
m_collectorNameMappings.put(name, collector);
if (CmsLog.INIT.isInfoEnabled()) {
CmsLog.INIT.info(Messages.get().getBundle().key(Messages.INIT_COLLECTOR_REPLACED_1, name));
}
} else {
if (CmsLog.INIT.isInfoEnabled()) {
CmsLog.INIT.info(
Messages.get().getBundle().key(Messages.INIT_DUPLICATE_COLLECTOR_SKIPPED_1, name));
}
}
} else {
m_collectorNameMappings.put(name, collector);
if (CmsLog.INIT.isInfoEnabled()) {
CmsLog.INIT.info(Messages.get().getBundle().key(Messages.INIT_ADD_COLLECTOR_1, name));
}
}
}
}
// ensure list is unmodifiable to avoid potential misuse or accidental changes
Collections.sort(m_collectors);
m_collectors = Collections.unmodifiableList(m_collectors);
m_collectorNameMappings = Collections.unmodifiableMap(m_collectorNameMappings);
// return the created collector instance
return collector;
}
|
java
|
public static ConfigParams readConfig(String correlationId, String path, ConfigParams parameters)
throws ApplicationException {
return new JsonConfigReader(path).readConfig(correlationId, parameters);
}
|
java
|
private static URL getConfigUrl() throws ConfigurationException {
String spec = System.getProperty(CONFIG_URL_PROPERTY_NAME);
if (spec == null) {
spec = DEFAULT_CONFIG_URL;
}
URL configUrl = null;
try {
configUrl = new URL(spec);
configUrl.openStream().close(); // catches well-formed but bogus URLs
} catch (Exception e) {
try {
File f = new File(spec);
if (f.exists()) {
configUrl = new URL("file:///" + f.getCanonicalPath());
}
} catch( Exception ex) {
}
}
if (configUrl == null) {
ClassLoader loader = ServerParams.class.getClassLoader();
configUrl = loader.getResource(spec);
if (configUrl == null) {
throw new ConfigurationException("Can't find file/resource: \"" + spec + "\".");
}
}
return configUrl;
}
|
java
|
public Waiter<GetDistributionRequest> distributionDeployed() {
return new WaiterBuilder<GetDistributionRequest, GetDistributionResult>().withSdkFunction(new GetDistributionFunction(client))
.withAcceptors(new DistributionDeployed.IsDeployedMatcher())
.withDefaultPollingStrategy(new PollingStrategy(new MaxAttemptsRetryStrategy(25), new FixedDelayStrategy(60)))
.withExecutorService(executorService).build();
}
|
java
|
private void addPostParams(final Request request) {
if (type != null) {
request.addPostParam("Type", type.toString());
}
if (configurationUrl != null) {
request.addPostParam("Configuration.Url", configurationUrl);
}
if (configurationMethod != null) {
request.addPostParam("Configuration.Method", configurationMethod.toString());
}
if (configurationFilters != null) {
for (String prop : configurationFilters) {
request.addPostParam("Configuration.Filters", prop);
}
}
if (configurationTriggers != null) {
for (String prop : configurationTriggers) {
request.addPostParam("Configuration.Triggers", prop);
}
}
if (configurationFlowSid != null) {
request.addPostParam("Configuration.FlowSid", configurationFlowSid);
}
if (configurationRetryCount != null) {
request.addPostParam("Configuration.RetryCount", configurationRetryCount.toString());
}
}
|
python
|
def _get_specifications(specifications):
"""
Computes the list of strings corresponding to the given specifications
:param specifications: A string, a class or a list of specifications
:return: A list of strings
:raise ValueError: Invalid specification found
"""
if not specifications or specifications is object:
raise ValueError("No specifications given")
elif inspect.isclass(specifications):
if Provides.USE_MODULE_QUALNAME:
if sys.version_info < (3, 3, 0):
raise ValueError(
"Qualified name capability requires Python 3.3+"
)
# Get the name of the class
if not specifications.__module__:
return [specifications.__qualname__]
return [
"{0}.{1}".format(
specifications.__module__, specifications.__qualname__
)
]
else:
# Legacy behavior
return [specifications.__name__]
elif is_string(specifications):
# Specification name
specifications = specifications.strip()
if not specifications:
raise ValueError("Empty specification given")
return [specifications]
elif isinstance(specifications, (list, tuple)):
# List given: normalize its content
results = []
for specification in specifications:
results.extend(_get_specifications(specification))
return results
else:
raise ValueError(
"Unhandled specifications type : {0}".format(
type(specifications).__name__
)
)
|
java
|
private void writeValue(Object value) throws JSONException {
if (value instanceof Number) {
String string = JSONObject.numberToString((Number) value);
int integer = this.valuekeep.find(string);
if (integer != none) {
write(2, 2);
write(integer, this.valuekeep);
return;
}
if (value instanceof Integer || value instanceof Long) {
long longer = ((Number) value).longValue();
if (longer >= 0 && longer < int14) {
write(0, 2);
if (longer < int4) {
zero();
write((int) longer, 4);
return;
}
one();
if (longer < int7) {
zero();
write((int)(longer - int4), 7);
return;
}
one();
write((int)(longer - int7), 14);
return;
}
}
write(1, 2);
for (int i = 0; i < string.length(); i += 1) {
write(bcd(string.charAt(i)), 4);
}
write(endOfNumber, 4);
this.valuekeep.register(string);
} else {
write(3, 2);
writeJSON(value);
}
}
|
java
|
public static XlsWorkbook createWorkbook(OutputStream os, Workbook existing)
throws IOException
{
try
{
if(existing != null)
return new XlsWorkbook(jxl.Workbook.createWorkbook(os,
(jxl.Workbook)existing.getWorkbook(), settings));
else
return new XlsWorkbook(jxl.Workbook.createWorkbook(os, settings));
}
catch(jxl.read.biff.BiffException e)
{
throw new IOException(e);
}
}
|
python
|
def _handle_table_row(self):
"""Parse as style until end of the line, then continue."""
self._head += 2
if not self._can_recurse():
self._emit_text("|-")
self._head -= 1
return
self._push(contexts.TABLE_OPEN | contexts.TABLE_ROW_OPEN)
padding = self._handle_table_style("\n")
style = self._pop()
# Don't parse the style separator:
self._head += 1
row = self._parse(contexts.TABLE_OPEN | contexts.TABLE_ROW_OPEN)
self._emit_table_tag("|-", "tr", style, padding, None, row, "")
# Offset displacement done by parse():
self._head -= 1
|
python
|
def iv(b, **kwargs):
"""Quick access to imview for interactive sessions
"""
import matplotlib.pyplot as plt
import imview.imviewer as imview
b = checkma(b)
#if hasattr(kwargs,'imshow_kwargs'):
# kwargs['imshow_kwargs']['interpolation'] = 'bicubic'
#else:
# kwargs['imshow_kwargs'] = {'interpolation': 'bicubic'}
#bma_fig(fig, bma, cmap='gist_rainbow_r', clim=None, bg=None, n_subplt=1, subplt=1, label=None, **imshow_kwargs)
fig = plt.figure()
imview.bma_fig(fig, b, **kwargs)
plt.show()
return fig
|
java
|
private void expandDirNodes(SpatialPrimitiveDistanceFunction<V> distFunction, DeLiCluNode node1, DeLiCluNode node2) {
if(LOG.isDebuggingFinest()) {
LOG.debugFinest("ExpandDirNodes: " + node1.getPageID() + " + " + node2.getPageID());
}
int numEntries_1 = node1.getNumEntries();
int numEntries_2 = node2.getNumEntries();
// insert all combinations of unhandled - handled children of
// node1-node2 into pq
for(int i = 0; i < numEntries_1; i++) {
DeLiCluEntry entry1 = node1.getEntry(i);
if(!entry1.hasUnhandled()) {
continue;
}
for(int j = 0; j < numEntries_2; j++) {
DeLiCluEntry entry2 = node2.getEntry(j);
if(!entry2.hasHandled()) {
continue;
}
double distance = distFunction.minDist(entry1, entry2);
heap.add(new SpatialObjectPair(distance, entry1, entry2, true));
}
}
}
|
python
|
def multi_normal(X, t):
"""
Multivariate normal sampler:
Generates normal samples with mean m, precision matrix LL'
Inputs:
x :
propose from
Outputs:
normal with mean m and precision LL'
"""
m, L = update_params(X, t)
z = np.random.standard_normal(np.shape(m)) # generate i.i.d N(0,1)
return la.solve(L.T,z)+m
|
python
|
def hover(self):
"""
Hovers the element
"""
def do_hover():
"""
Perform hover
"""
ActionChains(self.driver_wrapper.driver).move_to_element(self.element).perform()
return self.execute_and_handle_webelement_exceptions(do_hover, 'hover')
|
python
|
def _process_yes_work(self, yes_work, no_catalogue, maybe_works,
output_dir):
"""Returns statistics of how `yes_work` compares with the other works
in `no_catalogue` and the "maybe" works.
:param yes_work: name of work being processed
:type yes_work: `str`
:param no_catalogue: catalogue of containing `yes_work` and the "no"
works
:type no_catalogue: `Catalogue`
:param maybe_works: names of "maybe" works
:type maybe_works: `list` of `str`
:param output_dir: directory where generated files are saved
:type output_dir: `str`
:rtype: `dict`
"""
self._logger.info('Processing "maybe" work {} as "yes".'.format(
yes_work))
stats = {COMMON: {}, SHARED: {}, UNIQUE: {}}
yes_work_dir = os.path.join(output_dir, yes_work)
os.makedirs(yes_work_dir, exist_ok=True)
results_path = os.path.join(yes_work_dir, 'intersect_with_no.csv')
self._run_query(results_path, self._store.intersection, [no_catalogue])
for maybe_work in maybe_works:
stats = self._process_maybe_work(
yes_work, maybe_work, yes_work_dir, results_path, stats)
return stats
|
java
|
public boolean containsNone(UnicodeSet b) {
// The specified set is a subset if some of its pairs overlap with some of this set's pairs.
// This implementation accesses the lists directly for speed.
int[] listB = b.list;
boolean needA = true;
boolean needB = true;
int aPtr = 0;
int bPtr = 0;
int aLen = len - 1;
int bLen = b.len - 1;
int startA = 0, startB = 0, limitA = 0, limitB = 0;
while (true) {
// double iterations are such a pain...
if (needA) {
if (aPtr >= aLen) {
// ran out of A: break so we test strings
break;
}
startA = list[aPtr++];
limitA = list[aPtr++];
}
if (needB) {
if (bPtr >= bLen) {
// ran out of B: break so we test strings
break;
}
startB = listB[bPtr++];
limitB = listB[bPtr++];
}
// if B is higher than any part of A, get new A
if (startB >= limitA) {
needA = true;
needB = false;
continue;
}
// if A is higher than any part of B, get new B
if (startA >= limitB) {
needA = false;
needB = true;
continue;
}
// all other combinations mean we fail
return false;
}
if (!SortedSetRelation.hasRelation(strings, SortedSetRelation.DISJOINT, b.strings)) return false;
return true;
}
|
python
|
def get_random_connection(self):
"""
Open new connection to random redis server.
"""
if self._available_connections:
node_name = random.choice(list(self._available_connections.keys()))
conn_list = self._available_connections[node_name]
# check it in case of empty connection list
if conn_list:
return conn_list.pop()
for node in self.nodes.random_startup_node_iter():
connection = self.get_connection_by_node(node)
if connection:
return connection
raise Exception("Cant reach a single startup node.")
|
python
|
def _AlignUncompressedDataOffset(self, uncompressed_data_offset):
"""Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
Raises:
IOError: if the ZIP file could not be opened.
OSError: if the ZIP file could not be opened.
"""
if self._zip_ext_file:
self._zip_ext_file.close()
self._zip_ext_file = None
try:
# The open can fail if the file path in the local file header
# does not use the same path segment separator as the corresponding
# entry in the central directory.
self._zip_ext_file = self._zip_file.open(self._zip_info, 'r')
except zipfile.BadZipfile as exception:
raise IOError(
'Unable to open ZIP file with error: {0!s}'.format(exception))
self._uncompressed_data = b''
self._uncompressed_data_size = 0
self._uncompressed_data_offset = 0
while uncompressed_data_offset > 0:
self._ReadCompressedData(self._UNCOMPRESSED_DATA_BUFFER_SIZE)
if uncompressed_data_offset < self._uncompressed_data_size:
self._uncompressed_data_offset = uncompressed_data_offset
break
uncompressed_data_offset -= self._uncompressed_data_size
|
python
|
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if seen.get(lib.upper(), 0):
continue
if not is_win and not is_cygwin:
# all other platforms
npth = lib
dir, lib = os.path.split(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# now npth is a candidate lib if found
# check again for excludes but with regex FIXME: split the list
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if (candidatelib.find('libpython') < 0 and
candidatelib.find('Python.framework') < 0):
# skip libs not containing (libpython or Python.framework)
if not seen.get(npth.upper(), 0):
logger.debug("Skipping %s dependency of %s",
lib, os.path.basename(pth))
continue
else:
pass
if npth:
if not seen.get(npth.upper(), 0):
logger.debug("Adding %s dependency of %s",
lib, os.path.basename(pth))
rv.append((lib, npth))
else:
logger.error("lib not found: %s dependency of %s", lib, pth)
return rv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.