language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python
|
def connection_open(self) -> None:
"""
Callback when the WebSocket opening handshake completes.
Enter the OPEN state and start the data transfer phase.
"""
# 4.1. The WebSocket Connection is Established.
assert self.state is State.CONNECTING
self.state = State.OPEN
logger.debug("%s - state = OPEN", self.side)
# Start the task that receives incoming WebSocket messages.
self.transfer_data_task = self.loop.create_task(self.transfer_data())
# Start the task that sends pings at regular intervals.
self.keepalive_ping_task = self.loop.create_task(self.keepalive_ping())
# Start the task that eventually closes the TCP connection.
self.close_connection_task = self.loop.create_task(self.close_connection())
|
python
|
def open(self, directory=None, filename=None, mode="rb"):
"""Opens a file for writing or reading."""
path = os.path.join(directory, filename)
return open(path, mode)
|
java
|
public ServiceFuture<List<ProjectInner>> listAsync(final String groupName, final String serviceName, final ListOperationCallback<ProjectInner> serviceCallback) {
return AzureServiceFuture.fromPageResponse(
listSinglePageAsync(groupName, serviceName),
new Func1<String, Observable<ServiceResponse<Page<ProjectInner>>>>() {
@Override
public Observable<ServiceResponse<Page<ProjectInner>>> call(String nextPageLink) {
return listNextSinglePageAsync(nextPageLink);
}
},
serviceCallback);
}
|
python
|
def create_mixin(self):
"""
This will create the custom Model Mixin to attach to your custom field
enabled model.
:return:
"""
_builder = self
class CustomModelMixin(object):
@cached_property
def _content_type(self):
return ContentType.objects.get_for_model(self)
@classmethod
def get_model_custom_fields(cls):
""" Return a list of custom fields for this model, callable at model level """
return _builder.fields_model_class.objects.filter(content_type=ContentType.objects.get_for_model(cls))
def get_custom_fields(self):
""" Return a list of custom fields for this model """
return _builder.fields_model_class.objects.filter(content_type=self._content_type)
def get_custom_value(self, field):
""" Get a value for a specified custom field """
return _builder.values_model_class.objects.get(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
def set_custom_value(self, field, value):
""" Set a value for a specified custom field """
custom_value, created = \
_builder.values_model_class.objects.get_or_create(custom_field=field,
content_type=self._content_type,
object_id=self.pk)
custom_value.value = value
custom_value.full_clean()
custom_value.save()
return custom_value
#def __getattr__(self, name):
# """ Get a value for a specified custom field """
# try:
# obj = _builder.values_model_class.objects.get(custom_field__name=name,
# content_type=self._content_type,
# object_id=self.pk)
# return obj.value
# except ObjectDoesNotExist:
# pass
# return super(CustomModelMixin, self).__getattr__(name)
return CustomModelMixin
|
java
|
@Override
public CreateDatasetResult createDataset(CreateDatasetRequest request) {
request = beforeClientExecution(request);
return executeCreateDataset(request);
}
|
python
|
def transfer_funds(self, to, amount, denom, msg):
''' Transfer SBD or STEEM to the given account
'''
try:
self.steem_instance().commit.transfer(to,
float(amount), denom, msg, self.mainaccount)
except Exception as e:
self.msg.error_message(e)
return False
else:
return True
|
python
|
def group_associations(self, params=None):
"""
Gets the group association from a Indicator/Group/Victim
Yields: Group Association
"""
if params is None:
params = {}
if not self.can_update():
self._tcex.handle_error(910, [self.type])
for ga in self.tc_requests.group_associations(
self.api_type, self.api_sub_type, self.unique_id, owner=self.owner, params=params
):
yield ga
|
java
|
@Override
public BandMatrix transpose() {
BandMatrix at = new BandMatrix(n, m2, m1);
for (int i = 0; i < n; i++) {
for (int j = i-m2; j <= i+m1; j++) {
if (j >= 0 && j < n) {
at.set(i, j, get(j, i));
}
}
}
return at;
}
|
java
|
@Override
public EClass getIfcElectricCurrentMeasure() {
if (ifcElectricCurrentMeasureEClass == null) {
ifcElectricCurrentMeasureEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI)
.getEClassifiers().get(802);
}
return ifcElectricCurrentMeasureEClass;
}
|
python
|
def chat_post_message(self, channel, text, **params):
"""chat.postMessage
This method posts a message to a channel.
https://api.slack.com/methods/chat.postMessage
"""
method = 'chat.postMessage'
params.update({
'channel': channel,
'text': text,
})
return self._make_request(method, params)
|
java
|
public JobFolder createJobFolderWithApplicationId(final String applicationId) throws IOException {
final Path jobFolderPath = jobSubmissionDirectoryProvider.getJobSubmissionDirectoryPath(applicationId);
final String finalJobFolderPath = jobFolderPath.toString();
LOG.log(Level.FINE, "Final job submission Directory: " + finalJobFolderPath);
return createJobFolder(finalJobFolderPath);
}
|
python
|
def mine_block(chain: MiningChain, **kwargs: Any) -> MiningChain:
"""
Mine a new block on the chain. Header parameters for the new block can be
overridden using keyword arguments.
"""
if not isinstance(chain, MiningChain):
raise ValidationError('`mine_block` may only be used on MiningChain instances')
chain.mine_block(**kwargs)
return chain
|
python
|
def iiscgi(application):
"""A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks.
This is not a production quality interface and will behave badly under load.
"""
try:
from wsgiref.handlers import IISCGIHandler
except ImportError:
print("Python 3.2 or newer is required.")
if not __debug__:
warnings.warn("Interactive debugging and other persistence-based processes will not work.")
IISCGIHandler().run(application)
|
java
|
protected boolean isDeleteAllowed(String key) {
// check internal handlers first
for (ISharedObjectSecurity handler : securityHandlers) {
if (!handler.isDeleteAllowed(this, key)) {
return false;
}
}
// check global SO handlers next
final Set<ISharedObjectSecurity> handlers = getSecurityHandlers();
if (handlers == null) {
return true;
}
for (ISharedObjectSecurity handler : handlers) {
if (!handler.isDeleteAllowed(this, key)) {
return false;
}
}
return true;
}
|
python
|
def rate_entry_courses(self, topic_id, entry_id, course_id, rating=None):
"""
Rate entry.
Rate a discussion entry.
On success, the response will be 204 No Content with an empty body.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - topic_id
"""ID"""
path["topic_id"] = topic_id
# REQUIRED - PATH - entry_id
"""ID"""
path["entry_id"] = entry_id
# OPTIONAL - rating
"""A rating to set on this entry. Only 0 and 1 are accepted."""
if rating is not None:
data["rating"] = rating
self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating".format(**path), data=data, params=params, no_data=True)
|
java
|
public static void intIndexesToBooleanArray(int[] arra, boolean[] arrb) {
for (int i = 0; i < arra.length; i++) {
if (arra[i] < arrb.length) {
arrb[arra[i]] = true;
}
}
}
|
python
|
def selectable_delete(self) -> Expectation:
''' Remove two projects
by selecting them via `<space>` and pressing `d`
'''
self.cmd_sync('ProAdd! tpe2/dep')
self._count(2)
self.cmd_sync('Projects')
self.cmd_sync('call feedkeys("\\<space>\\<space>d")')
return self._count(0)
|
python
|
def extract_spectra_from_file(
log,
pathToSpectrum,
convertLumToFlux=False):
"""
*Given a spectrum file this function shall convert the two columns (wavelength and luminosity) to a wavelegnth (wavelengthArray) and flux (fluxArray) array*
**Key Arguments:**
- ``log`` -- logger
- ``pathToSpectrum`` -- absolute path the the spectrum file
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
import os
## THIRD PARTY ##
import numpy as np
## LOCAL APPLICATION ##
import dryxPython.astrotools as at
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
# USE numPy TO EXTRACT THE DATA FROM FILE
pwd = os.getcwd()
log.debug('pwd %s' % (pwd,))
log.debug('pathToSpectrum %s' % (pathToSpectrum,))
data = np.genfromtxt(pathToSpectrum, skip_header=0, usecols=(0, 1))
wavelengthArray = data[:, 0]
# minWl = wavelengthArray.min()
# maxWl = wavelengthArray.max()
luminosityArray = data[:, 1]
# CONVERT TO FLUX: F = L / 4*pi*(r**2)
if convertLumToFlux:
fluxArray = at.luminosity_to_flux(luminosityArray, 1e-5)
else:
fluxArray = luminosityArray
# DEBUG BLOCK
log.debug('pathToSpectrum: %s' % (pathToSpectrum,))
# for i in range(len(fluxArray)):
# print """%s\t%s\t%s""" % (wavelengthArray[i], luminosityArray[i], fluxArray[i] )
# print "\n\n\n"
return wavelengthArray, fluxArray
|
java
|
public void sendfile(OutputStream os, long offset, long length)
throws IOException
{
if (os instanceof OutputStreamWithBuffer) {
writeToStream((OutputStreamWithBuffer) os);
}
else {
writeToStream(os);
}
}
|
python
|
def _callback(self):
"""The actual callback."""
if self.debug:
# Show the number of open file descriptors
print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())
self._runem_all()
# Mission accomplished. Shutdown the scheduler.
all_ok = self.flow.all_ok
if all_ok:
return self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")
# Handle failures.
err_lines = []
# Shall we send a reminder to the user?
delta_etime = self.get_delta_etime()
if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
self.num_reminders += 1
msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
(self.pid, self.flow, delta_etime))
retcode = self.send_email(msg, tag="[REMINDER]")
if retcode:
# Cannot send mail, shutdown now!
msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
" but send_email returned %d. Error is not critical though!" % retcode)
print(msg)
#err_lines.append(msg)
#if delta_etime.total_seconds() > self.max_etime_s:
# err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)
# Too many exceptions. Shutdown the scheduler.
if self.num_excs > self.max_num_pyexcs:
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
self.num_excs, self.max_num_pyexcs)
err_lines.append(boxed(msg))
# Paranoid check: disable the scheduler if we have submitted
# too many jobs (it might be due to some bug or other external reasons
# such as race conditions between difference callbacks!)
if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
self.nlaunch, self.flow.num_tasks)
err_lines.append(boxed(msg))
# Count the number of tasks with status == S_ERROR.
if self.flow.num_errored_tasks > self.max_num_abierrs:
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
self.flow.num_errored_tasks, self.max_num_abierrs)
err_lines.append(boxed(msg))
# Test on the presence of deadlocks.
g = self.flow.find_deadlocks()
if g.deadlocked:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
#print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables, "\nrunning\n", g.running)
print("deadlocked:", len(g.deadlocked), ", runnables:", len(g.runnables), ", running:", len(g.running))
if g.deadlocked and not g.runnables and not g.running:
err_lines.append("No runnable job with deadlocked tasks:\n%s." % str(g.deadlocked))
if not g.runnables and not g.running:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
if not g.runnables and not g.running:
err_lines.append("No task is running and cannot find other tasks to submit.")
# Something wrong. Quit
if err_lines:
# Cancel all jobs.
if self.killjobs_if_errors:
cprint("killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.", "yellow")
try:
num_cancelled = 0
for task in self.flow.iflat_tasks():
num_cancelled += task.cancel()
cprint("Killed %d tasks" % num_cancelled, "yellow")
except Exception as exc:
cprint("Exception while trying to kill jobs:\n%s" % str(exc), "red")
self.shutdown("\n".join(err_lines))
return len(self.exceptions)
|
java
|
protected <T> void writeArray(T[] values, WriteOp<T> writer) {
writeList(Arrays.asList(values), writer);
}
|
python
|
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
|
java
|
@Override
public Set<IPersonAttributes> getPeopleWithMultivaluedAttributes(final Map<String, List<Object>> seed,
final IPersonAttributeDaoFilter filter) {
Validate.notNull(seed, "Argument 'seed' cannot be null.");
if (patterns == null || patterns.size() < 1) {
throw new IllegalStateException("patterns Map may not be null and must contain at least 1 mapping.");
}
if (targetPersonAttributeDao == null) {
throw new IllegalStateException("targetPersonAttributeDao may not be null");
}
//Flag for patterns that match
boolean matchedPatterns = false;
//Iterate through all attributeName/pattern pairs
for (final Map.Entry<String, Pattern> patternEntry : this.patterns.entrySet()) {
final String attributeName = patternEntry.getKey();
final List<Object> attributeValues = seed.get(attributeName);
//Check if the value exists
if (attributeValues == null) {
if (this.matchAllPatterns) {
//Need to match ALL patters, if the attribute isn't in the seed it can't be matched, return null
if (this.logger.isInfoEnabled()) {
this.logger.info("All patterns must match and attribute='" + attributeName + "' does not exist in the seed, returning null.");
}
return null;
}
//Don't need to match all, just go to the next attribute and see if it exists
continue;
}
//The pattern to test the attribute's value(s) with
final Pattern compiledPattern = patternEntry.getValue();
if (compiledPattern == null) {
throw new IllegalStateException("Attribute '" + attributeName + "' has a null pattern");
}
//Flag for matching the pattern on the values
boolean matchedValues = false;
//Iterate over the values for the attribute, testing each against the pattern
for (final Object valueObj : attributeValues) {
final String value;
try {
value = (String) valueObj;
} catch (final ClassCastException cce) {
final IllegalArgumentException iae = new IllegalArgumentException("RegexGatewayPersonAttributeDao can only accept seeds who's values are String or List of String. Attribute '" + attributeName + "' has a non-String value.");
iae.initCause(cce);
throw iae;
}
//Check if the value matches the pattern
final Matcher valueMatcher = compiledPattern.matcher(value);
matchedValues = valueMatcher.matches();
//Only one value needs to be matched, this one matched so no need to test the rest, break out of the loop
if (matchedValues && !this.matchAllValues) {
if (this.logger.isDebugEnabled()) {
this.logger.debug("value='" + value + "' matched pattern='" + compiledPattern + "' and only one value match is needed, leaving value matching loop.");
}
break;
}
//Need to match all values, this one failed so no need to test the rest, break out of the loop
else if (!matchedValues && this.matchAllValues) {
if (this.logger.isDebugEnabled()) {
this.logger.debug("value='" + value + "' did not match pattern='" + compiledPattern + "' and all values need to match, leaving value matching loop.");
}
break;
}
//Extra logging
else if (this.logger.isDebugEnabled()) {
if (matchedValues) {
this.logger.debug("value='" + value + "' matched pattern='" + compiledPattern + "' and all values need to match, continuing value matching loop.");
} else {
this.logger.debug("value='" + value + "' did not match pattern='" + compiledPattern + "' and only one value match is needed, continuing value matching loop.");
}
}
}
matchedPatterns = matchedValues;
//Only one pattern needs to be matched, this one matched so no need to test the rest, break out of the loop
if (matchedPatterns && !this.matchAllPatterns) {
if (this.logger.isDebugEnabled()) {
this.logger.debug("pattern='" + compiledPattern + "' found a match and only one pattern match is needed, leaving pattern matching loop.");
}
break;
}
//Need to match all patterns, this one failed so no need to test the rest, break out of the loop
else if (!matchedPatterns && this.matchAllPatterns) {
if (this.logger.isDebugEnabled()) {
this.logger.debug("pattern='" + compiledPattern + "' did not find a match and all patterns need to match, leaving pattern matching loop.");
}
break;
}
//Extra logging
else if (this.logger.isDebugEnabled()) {
if (matchedPatterns) {
this.logger.debug("pattern='" + compiledPattern + "' found a match and all patterns need to match, continuing pattern matching loop.");
} else {
this.logger.debug("pattern='" + compiledPattern + "' did not find a match and only one pattern match is needed, continuing pattern matching loop.");
}
}
}
//Execute the wrapped DAO if the match criteria was met
if (matchedPatterns) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Matching criteria '" + this.patterns + "' was met for query '" + seed + "', delegating call to the targetPersonAttributeDao='" + this.targetPersonAttributeDao + "'");
}
return this.targetPersonAttributeDao.getPeopleWithMultivaluedAttributes(seed, filter);
}
if (this.logger.isInfoEnabled()) {
this.logger.info("Matching criteria '" + this.patterns + "' was not met for query '" + seed + "', return null");
}
return null;
}
|
python
|
def quarter_to_daterange(quarter):
"""Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes"""
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {
'1': ((1, 1), (3, 31)),
'2': ((4, 1), (6, 30)),
'3': ((7, 1), (9, 30)),
'4': ((10, 1), (12, 31))
}
md = MONTH_DAY[quarter]
start_md, end_md = md
return (
date(year, *start_md),
date(year, *end_md)
)
|
python
|
def read_xml(filename):
"""
Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None
"""
parser = et.XMLParser(remove_blank_text=True)
isfile=False
try:
isfile = os.path.exists(filename)
except ValueError as e:
if 'path too long for Windows' in str(e):
pass
else:
raise
try:
if isfile:
return et.parse(filename, parser)
else:
r = et.fromstring(filename, parser)
return r.getroottree()
except IOError:
log.exception('unable to open file [[}]'.format(filename))
except et.XMLSyntaxError:
log.exception('unable to parse XML [{}]'.format(filename))
return None
return None
|
java
|
public String[] getServices(Database database, String servicename, String instname)
throws DevFailed {
Vector<String> v = new Vector<String>();
char separ;
// Read Service property
DbDatum datum = get_property(database, TangoConst.CONTROL_SYSTEM,
TangoConst.SERVICE_PROP_NAME, true);
if (!datum.is_empty()) {
String[] services = datum.extractStringArray();
// Build filter
String target = servicename.toLowerCase();
if (!instname.equals("*")) {
target += "/" + instname.toLowerCase();
separ = ':';
} else
separ = '/';
// Search with filter
int start;
for (String service : services) {
start = service.indexOf(separ);
if (start>0) {
String startLine =
service.substring(0, start).toLowerCase();
if (startLine.equals(target))
v.add(service.substring(
service.indexOf(':') + 1));
}
}
}
String[] result = new String[v.size()];
for (int i = 0 ; i<v.size() ; i++)
result[i] = v.get(i);
return result;
}
|
java
|
public <T extends Object> T load(Class<T> clazz, Object hashKey, DynamoDBMapperConfig config) {
return load(clazz, hashKey, null, config);
}
|
python
|
def p2sh(self, s):
"""
Parse a pay-to-script-hash address.
Return a :class:`Contract` or None.
"""
data = self.parse_b58_hashed(s)
if (None in (data, self._pay_to_script_prefix) or
not data.startswith(self._pay_to_script_prefix)):
return None
size = len(self._pay_to_script_prefix)
script = self._network.contract.for_p2sh(data[size:])
script_info = self._network.contract.info_for_script(script)
return Contract(script_info, self._network)
|
python
|
def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None
|
python
|
def pOparapar(self,Opar,apar,tdisrupt=None):
"""
NAME:
pOparapar
PURPOSE:
return the probability of a given parallel (frequency,angle) offset pair
INPUT:
Opar - parallel frequency offset (array) (can be Quantity)
apar - parallel angle offset along the stream (scalar) (can be Quantity)
OUTPUT:
p(Opar,apar)
HISTORY:
2015-12-07 - Written - Bovy (UofT)
"""
if _APY_LOADED and isinstance(Opar,units.Quantity):
Opar= Opar.to(1/units.Gyr).value\
/bovy_conversion.freq_in_Gyr(self._vo,self._ro)
if _APY_LOADED and isinstance(apar,units.Quantity):
apar= apar.to(units.rad).value
if tdisrupt is None: tdisrupt= self._tdisrupt
if isinstance(Opar,(int,float,numpy.float32,numpy.float64)):
Opar= numpy.array([Opar])
out= numpy.zeros(len(Opar))
# Compute ts
ts= apar/Opar
# Evaluate
out[(ts < tdisrupt)*(ts >= 0.)]=\
numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
|
python
|
def getroot(self):
"""Return the root element of the figure.
The root element is a group of elements after stripping the toplevel
``<svg>`` tag.
Returns
-------
GroupElement
All elements of the figure without the ``<svg>`` tag.
"""
if 'class' in self.root.attrib:
attrib = {'class': self.root.attrib['class']}
else:
attrib = None
return GroupElement(self.root.getchildren(), attrib=attrib)
|
java
|
private void addToolbarButtons() {
Button add = CmsToolBar.createButton(
FontOpenCms.WAND,
CmsVaadinUtils.getMessageText(Messages.GUI_RESOURCETYPE_CREATE_NEW_TYPE_0));
add.addClickListener(new ClickListener() {
private static final long serialVersionUID = 1L;
public void buttonClick(ClickEvent event) {
Window window = CmsBasicDialog.prepareWindow(DialogWidth.max);
CmsNewResourceTypeDialog dialog = new CmsNewResourceTypeDialog(window, CmsResourceTypeApp.this);
CmsMoveResourceTypeDialog moduleDialog = new CmsMoveResourceTypeDialog(dialog);
window.setContent(moduleDialog);
window.setCaption(CmsVaadinUtils.getMessageText(Messages.GUI_RESOURCETYPE_CREATE_NEW_TYPE_0));
A_CmsUI.get().addWindow(window);
}
});
m_uiContext.addToolbarButton(add);
}
|
python
|
async def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
connection = self.connection
if connection is None:
raise RuntimeError(
'pubsub connection not set: '
'did you forget to call subscribe() or psubscribe()?')
coro = self._execute(connection, connection.read_response)
if not block and timeout > 0:
try:
return await asyncio.wait_for(coro, timeout)
except Exception:
return None
return await coro
|
java
|
public static String getSelectColumns(Class<?> entityClass) {
EntityTable entityTable = getEntityTable(entityClass);
if (entityTable.getBaseSelect() != null) {
return entityTable.getBaseSelect();
}
Set<EntityColumn> columnList = getColumns(entityClass);
StringBuilder selectBuilder = new StringBuilder();
boolean skipAlias = Map.class.isAssignableFrom(entityClass);
for (EntityColumn entityColumn : columnList) {
selectBuilder.append(entityColumn.getColumn());
if (!skipAlias && !entityColumn.getColumn().equalsIgnoreCase(entityColumn.getProperty())) {
//不等的时候分几种情况,例如`DESC`
if (entityColumn.getColumn().substring(1, entityColumn.getColumn().length() - 1).equalsIgnoreCase(entityColumn.getProperty())) {
selectBuilder.append(",");
} else {
selectBuilder.append(" AS ").append(entityColumn.getProperty()).append(",");
}
} else {
selectBuilder.append(",");
}
}
entityTable.setBaseSelect(selectBuilder.substring(0, selectBuilder.length() - 1));
return entityTable.getBaseSelect();
}
|
python
|
def get_object(self, ObjectClass, id):
""" Retrieve object of type ``ObjectClass`` by ``id``.
| Returns object on success.
| Returns None otherwise.
"""
try:
object = ObjectClass.objects.get(id=id)
except (ObjectClass.DoesNotExist, ObjectClass.MultipleObjectsReturned):
object = None
return object
|
java
|
public void marshall(Namespace namespace, ProtocolMarshaller protocolMarshaller) {
if (namespace == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(namespace.getId(), ID_BINDING);
protocolMarshaller.marshall(namespace.getArn(), ARN_BINDING);
protocolMarshaller.marshall(namespace.getName(), NAME_BINDING);
protocolMarshaller.marshall(namespace.getType(), TYPE_BINDING);
protocolMarshaller.marshall(namespace.getDescription(), DESCRIPTION_BINDING);
protocolMarshaller.marshall(namespace.getServiceCount(), SERVICECOUNT_BINDING);
protocolMarshaller.marshall(namespace.getProperties(), PROPERTIES_BINDING);
protocolMarshaller.marshall(namespace.getCreateDate(), CREATEDATE_BINDING);
protocolMarshaller.marshall(namespace.getCreatorRequestId(), CREATORREQUESTID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
|
python
|
def join(self, table, one=None,
operator=None, two=None, type='inner', where=False):
"""
Add a join clause to the query
:param table: The table to join with, can also be a JoinClause instance
:type table: str or JoinClause
:param one: The first column of the join condition
:type one: str
:param operator: The operator of the join condition
:type operator: str
:param two: The second column of the join condition
:type two: str
:param type: The join type
:type type: str
:param where: Whether to use a "where" rather than a "on"
:type where: bool
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
if isinstance(table, JoinClause):
self.joins.append(table)
else:
if one is None:
raise ArgumentError('Missing "one" argument')
join = JoinClause(table, type)
self.joins.append(join.on(
one, operator, two, 'and', where
))
return self
|
python
|
def finish(self, job_id, queue_id, queue_type='default'):
"""Marks any dequeued job as *completed successfully*.
Any job which gets a finish will be treated as complete
and will be removed from the SharQ.
"""
if not is_valid_identifier(job_id):
raise BadArgumentException('`job_id` has an invalid value.')
if not is_valid_identifier(queue_id):
raise BadArgumentException('`queue_id` has an invalid value.')
if not is_valid_identifier(queue_type):
raise BadArgumentException('`queue_type` has an invalid value.')
keys = [
self._key_prefix,
queue_type
]
args = [
queue_id,
job_id
]
response = {
'status': 'success'
}
finish_response = self._lua_finish(keys=keys, args=args)
if finish_response == 0:
# the finish failed.
response.update({
'status': 'failure'
})
return response
|
java
|
PathMatcher.PathMatch<HostEntry> match(String path){
int length = path.length();
final int[] lengths = this.lengths;
for (int i = 0; i < lengths.length; ++i) {
int pathLength = lengths[i];
if (pathLength == length) {
HostEntry next = contexts.get(path);
if (next != null) {
return new PathMatcher.PathMatch<>(path, "", next);
}
} else if (pathLength < length) {
char c = path.charAt(pathLength);
if (c == '/') {
String part = path.substring(0, pathLength);
HostEntry next = contexts.get(part);
if (next != null) {
return new PathMatcher.PathMatch<>(part, path.substring(pathLength), next);
}
}
}
}
if(defaultHandler.contexts.isEmpty()) {
return new PathMatcher.PathMatch<>("", path, null);
}
return new PathMatcher.PathMatch<>("", path, defaultHandler);
}
|
java
|
public java.lang.String getXmlns() {
return (java.lang.String) getStateHelper().eval(PropertyKeys.xmlns);
}
|
python
|
def update_pipe_module():
"""Populate the pipe module dynamically."""
module_dir = __all__
operators = stream.__dict__
for key, value in operators.items():
if getattr(value, 'pipe', None):
globals()[key] = value.pipe
if key not in module_dir:
module_dir.append(key)
|
python
|
def pretty_print(self, carrot=False):
"""Return a string of this line including linenumber.
If carrot is True then a line is added under the string with a carrot
under the current character position.
"""
lineno = self.lineno
padding = 0
if lineno < 1000:
padding = 1
if lineno < 100:
padding = 2
if lineno < 10:
padding = 3
string = str(lineno) + (' ' * padding) + '|' + self.string
if carrot:
string += '\n' + (' ' * (self.col + 5))
return string
|
java
|
public ReceiveMessageResult receiveMessage(String queueUrl) {
ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(queueUrl);
return receiveMessage(receiveMessageRequest);
}
|
java
|
public String format(String s)
{
for(int i = 0; i < s.length(); i++) {
if(s.substring(i, i + 1).equals("*") && i > 0)
if(isOperand(s.substring(i - 1, i), var) && i < s.length() - 1 && s.substring(i + 1, i + 2).equals(var))
s = s.substring(0, i) + s.substring(i + 1);
}
return s;
}
|
java
|
public void close() {
super.clear();
if (indexMap != null){
indexMap.clear();
indexMap = null;
}
if (this.indexStore != null){
getIndexStore().close();
this.indexStore = null ;
}
if (this.cacheStore != null){
getCacheStore().close();
this.cacheStore = null ;
}
}
|
python
|
def _updateEndpoints(self,*args,**kwargs):
"""
Updates all endpoints except the one from which this slot was called.
Note: this method is probably not complete threadsafe. Maybe a lock is needed when setter self.ignoreEvents
"""
sender = self.sender()
if not self.ignoreEvents:
self.ignoreEvents = True
for binding in self.bindings.values():
if binding.instanceId == id(sender):
continue
if args:
binding.setter(*args,**kwargs)
else:
binding.setter(self.bindings[id(sender)].getter())
self.ignoreEvents = False
|
python
|
def render(self, name, value, attrs=None):
'''Render the widget as HTML inputs for display on a form.
:param name: form field base name
:param value: date value
:param attrs: - unused
:returns: HTML text with three inputs for year/month/day
'''
# expects a value in format YYYY-MM-DD or YYYY-MM or YYYY (or empty/None)
year, month, day = 'YYYY', 'MM', 'DD'
if value:
# use the regular expression to pull out year, month, and day values
# if regular expression does not match, inputs will be empty
match = W3C_DATE_RE.match(value)
if match:
date_parts = match.groupdict()
year = date_parts['year']
month = date_parts['month']
day = date_parts['day']
year_html = self.create_textinput(name, self.year_field, year, size=4, title='4-digit year', onClick='javascript:if(this.value == "YYYY") { this.value = "" };')
month_html = self.create_textinput(name, self.month_field, month, size=2, title='2-digit month', onClick='javascript:if(this.value == "MM") { this.value = "" };')
day_html = self.create_textinput(name, self.day_field, day, size=2, title='2-digit day', onClick='javascript:if(this.value == "DD") { this.value = "" };')
# display widget fields in YYYY-MM-DD order to match W3C date format,
# and putting required field(s) on the left
output = [year_html, month_html, day_html]
return mark_safe(u' / \n'.join(output))
|
python
|
def split_levels(fields):
"""
Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into
current-level fields ['a', 'c'] and next-level fields
{'a': ['b', 'd']}.
"""
first_level_fields = []
next_level_fields = {}
if not fields:
return first_level_fields, next_level_fields
if not isinstance(fields, list):
fields = [a.strip() for a in fields.split(",") if a.strip()]
for e in fields:
if "." in e:
first_level, next_level = e.split(".", 1)
first_level_fields.append(first_level)
next_level_fields.setdefault(first_level, []).append(next_level)
else:
first_level_fields.append(e)
first_level_fields = list(set(first_level_fields))
return first_level_fields, next_level_fields
|
python
|
def csch(x, context=None):
"""
Return the hyperbolic cosecant of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_csch,
(BigFloat._implicit_convert(x),),
context,
)
|
java
|
public void addString(String str)
{
if (sourceNode != null)
{
addStringInternal(str);
replaceOrRegister(sourceNode, str);
}
else
{
unSimplify();
addString(str);
}
}
|
python
|
def clear(self):
"""Empty the instance """
# self.graph = nx.Graph()
self.graph.clear()
self.data.clear()
self.descriptors.clear()
self.size2d = None
self.rings = None
self.scaffolds = None
self.isolated = None
|
java
|
public Observable<ServiceResponse<Page<DeletedStorageAccountItem>>> getDeletedStorageAccountsWithServiceResponseAsync(final String vaultBaseUrl) {
return getDeletedStorageAccountsSinglePageAsync(vaultBaseUrl)
.concatMap(new Func1<ServiceResponse<Page<DeletedStorageAccountItem>>, Observable<ServiceResponse<Page<DeletedStorageAccountItem>>>>() {
@Override
public Observable<ServiceResponse<Page<DeletedStorageAccountItem>>> call(ServiceResponse<Page<DeletedStorageAccountItem>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(getDeletedStorageAccountsNextWithServiceResponseAsync(nextPageLink));
}
});
}
|
python
|
def _set_prompt(self):
"""Set prompt so it displays the current working directory."""
self.cwd = os.getcwd()
self.prompt = Fore.CYAN + '{!r} $ '.format(self.cwd) + Fore.RESET
|
python
|
def transfer_all(cls, field_mappings, sources, destination_factory=None):
"""Calls cls.transfer on all records in sources."""
for index, source in enumerate(sources):
try:
yield cls.transfer(field_mappings, source, destination_factory or (lambda x: x))
except Exception as ex:
raise Exception("Error with source #{0}: {1}".format(index, ex)) from ex
|
python
|
def by_re_lookup(self, style_key, style_value, re_flags=0):
"""Return a processor for a "re_lookup" style value.
Parameters
----------
style_key : str
A style key.
style_value : dict
A dictionary with a "re_lookup" style value that consists of a
sequence of items where each item should have the form `(regexp,
x)`, where regexp is a regular expression to match against the
field value and x is either a style attribute (str) and a boolean
flag indicating to use the style attribute named by `style_key`.
re_flags : int
Passed through as flags argument to re.compile.
Returns
-------
A function.
"""
style_attr = style_key if self.style_types[style_key] is bool else None
regexps = [(re.compile(r, flags=re_flags), v)
for r, v in style_value["re_lookup"]]
def proc(value, result):
if not isinstance(value, six.string_types):
return result
for r, lookup_value in regexps:
if r.search(value):
if not lookup_value:
return result
return self.render(style_attr or lookup_value, result)
return result
return proc
|
java
|
public java.util.List<InstanceInformation> getInstanceInformationList() {
if (instanceInformationList == null) {
instanceInformationList = new com.amazonaws.internal.SdkInternalList<InstanceInformation>();
}
return instanceInformationList;
}
|
python
|
def getSiblings(self, textId, subreference):
""" Retrieve the siblings of a textual node
:param textId: PrototypeText Identifier
:type textId: str
:param subreference: Passage Reference
:type subreference: str
:return: Tuple of references
:rtype: (str, str)
"""
key = _cache_key("Nautilus", self.name, "Siblings", textId, subreference)
o = self.cache.get(key)
if o is not None:
return o
passage = self.getTextualNode(textId, subreference, prevnext=True)
siblings = passage.siblingsId
self.cache.set(key, siblings)
return siblings
|
python
|
async def search_raw(self, term: str, limit: int = 3) -> List[dict]:
"""Performs a search for a term and returns the raw response.
Args:
term: The term to be defined.
limit: The maximum amount of results you'd like.
Defaults to 3.
Returns:
A list of :class:`dict`\s which contain word information.
"""
return (await self._get(term=term))['list'][:limit]
|
python
|
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 422 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
abort(
status_code,
exc=error,
messages=error.messages,
schema=schema,
headers=error_headers,
)
|
python
|
def cli(*args, **kwargs):
"""
CSVtoTable commandline utility.
"""
# Convert CSV file
content = convert.convert(kwargs["input_file"], **kwargs)
# Serve the temporary file in browser.
if kwargs["serve"]:
convert.serve(content)
# Write to output file
elif kwargs["output_file"]:
# Check if file can be overwrite
if (not kwargs["overwrite"] and
not prompt_overwrite(kwargs["output_file"])):
raise click.Abort()
convert.save(kwargs["output_file"], content)
click.secho("File converted successfully: {}".format(
kwargs["output_file"]), fg="green")
else:
# If its not server and output file is missing then raise error
raise click.BadOptionUsage("Missing argument \"output_file\".")
|
python
|
def missing_count(self):
"""numeric representing count of missing rows in cube response."""
if self.means:
return self.means.missing_count
return self._cube_dict["result"].get("missing", 0)
|
python
|
def config_list(backend):
"""
Print the current configuration
"""
click.secho('Print Configuration', fg='green')
print str(backend.dki.get_config())
|
python
|
def _filter_deleted_records(self, batches):
"""
Filter out deleted records
"""
for batch in batches:
for record in batch:
if not self.must_delete_record(record):
yield record
|
python
|
def add_pii_permissions(self, group, view_only=None):
"""Adds PII model permissions.
"""
pii_model_names = [m.split(".")[1] for m in self.pii_models]
if view_only:
permissions = Permission.objects.filter(
(Q(codename__startswith="view") | Q(codename__startswith="display")),
content_type__model__in=pii_model_names,
)
else:
permissions = Permission.objects.filter(
content_type__model__in=pii_model_names
)
for permission in permissions:
group.permissions.add(permission)
for model in self.pii_models:
permissions = Permission.objects.filter(
codename__startswith="view",
content_type__app_label=model.split(".")[0],
content_type__model=f"historical{model.split('.')[1]}",
)
for permission in permissions:
group.permissions.add(permission)
for permission in Permission.objects.filter(
content_type__app_label="edc_registration",
codename__in=[
"add_registeredsubject",
"delete_registeredsubject",
"change_registeredsubject",
],
):
group.permissions.remove(permission)
permission = Permission.objects.get(
content_type__app_label="edc_registration",
codename="view_historicalregisteredsubject",
)
group.permissions.add(permission)
|
java
|
public CMAUiExtension update(CMAUiExtension extension) {
assertNotNull(extension, "extension");
final Integer version = getVersionOrThrow(extension, "update");
final String id = getResourceIdOrThrow(extension, "extension");
final String spaceId = getSpaceIdOrThrow(extension, "extension");
assertNotNull(extension.getEnvironmentId(), "environmentId");
final String environmentId = extension.getEnvironmentId();
final CMASystem system = extension.getSystem();
extension.setSystem(null);
try {
return service.update(spaceId, environmentId, id, extension, version).blockingFirst();
} finally {
extension.setSystem(system);
}
}
|
java
|
@Override
public com.liferay.commerce.product.model.CProduct addCProduct(
com.liferay.commerce.product.model.CProduct cProduct) {
return _cProductLocalService.addCProduct(cProduct);
}
|
python
|
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
padding = padding.upper()
if padding == 'SAME':
out_shape = onp.ceil(
onp.true_divide(in_shape, window_strides)).astype(int)
pad_sizes = [max((out_size - 1) * stride + window_shape - in_size, 0)
for out_size, stride, window_shape, in_size
in zip(out_shape, window_strides, window_shape, in_shape)]
return [(pad_size // 2, pad_size - pad_size // 2)
for pad_size in pad_sizes]
elif padding == 'VALID':
return [(0, 0)] * len(in_shape)
else:
msg = 'Unknown padding type: {}.'
raise TypeError(msg.format(padding))
|
java
|
@Override
public StopContinuousExportResult stopContinuousExport(StopContinuousExportRequest request) {
request = beforeClientExecution(request);
return executeStopContinuousExport(request);
}
|
java
|
@Override
public void play(SpeechAnnouncement announcement) {
boolean isInvalidAnnouncement = announcement == null;
if (isInvalidAnnouncement) {
return;
}
this.announcement = announcement;
playAnnouncementTextAndTypeFrom(announcement);
}
|
python
|
def connect_params_async(self):
"""Connection parameters for `aiomysql.Connection`
"""
kwargs = self.connect_params.copy()
kwargs.update({
'minsize': self.min_connections,
'maxsize': self.max_connections,
'autocommit': True,
})
return kwargs
|
python
|
def id(self, opts_id):
"""Handles tracking and cleanup of custom ids."""
old_id = self._id
self._id = opts_id
if old_id is not None:
cleanup_custom_options(old_id)
if opts_id is not None and opts_id != old_id:
if opts_id not in Store._weakrefs:
Store._weakrefs[opts_id] = []
ref = weakref.ref(self, partial(cleanup_custom_options, opts_id))
Store._weakrefs[opts_id].append(ref)
|
python
|
def geocode(
self,
query,
bbox=None,
mapview=None,
exactly_one=True,
maxresults=None,
pageinformation=None,
language=None,
additional_data=False,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['app_id'] = self.app_id
params['app_code'] = self.app_code
else:
params = {
'searchtext': self.format_string % query,
'app_id': self.app_id,
'app_code': self.app_code
}
if bbox:
params['bbox'] = self._format_bounding_box(
bbox, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if mapview:
params['mapview'] = self._format_bounding_box(
mapview, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if pageinformation:
params['pageinformation'] = pageinformation
if maxresults:
params['maxresults'] = maxresults
if exactly_one:
params['maxresults'] = 1
if language:
params['language'] = language
if additional_data:
params['additionaldata'] = additional_data
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
|
java
|
public void run() {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName os;
try {
os = new ObjectName("java.lang:type=OperatingSystem");
} catch( MalformedObjectNameException e ) {
throw Log.throwErr(e);
}
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
int counter = 0;
//noinspection InfiniteLoopStatement
while( true ) {
// Update the interesting health self-info for publication also
H2O cloud = H2O.CLOUD;
HeartBeat hb = H2O.SELF._heartbeat;
hb._hb_version = HB_VERSION++;
hb._jvm_boot_msec= TimeLine.JVM_BOOT_MSEC;
// Memory utilization as of last FullGC
long kv_gc = Cleaner.KV_USED_AT_LAST_GC;
long heap_gc = Cleaner.HEAP_USED_AT_LAST_GC;
long pojo_gc = Math.max(heap_gc - kv_gc,0);
long kv_mem = Cleaner.Histo.cached(); // More current than last FullGC numbers; can skyrocket
// Since last FullGC, assuming POJO remains constant and KV changed: new free memory
long free_mem = Math.max(MemoryManager.MEM_MAX-kv_mem-pojo_gc,0);
long pojo_mem = MemoryManager.MEM_MAX-kv_mem-free_mem;
hb.set_kv_mem(kv_mem);
hb.set_pojo_mem(pojo_mem);
hb.set_free_mem(free_mem);
hb.set_swap_mem(Cleaner.Histo.swapped());
hb._keys = H2O.STORE.size();
try {
hb._system_load_average = ((Double)mbs.getAttribute(os, "SystemLoadAverage")).floatValue();
if( hb._system_load_average == -1 ) // SystemLoadAverage not available on windows
hb._system_load_average = ((Double)mbs.getAttribute(os, "SystemCpuLoad")).floatValue();
} catch( Exception e ) {/*Ignore, data probably not available on this VM*/ }
int rpcs = 0;
for( H2ONode h2o : cloud._memary )
rpcs += h2o.taskSize();
hb._rpcs = (char)rpcs;
// Scrape F/J pool counts
hb._fjthrds = new short[H2O.MAX_PRIORITY+1];
hb._fjqueue = new short[H2O.MAX_PRIORITY+1];
for( int i=0; i<hb._fjthrds.length; i++ ) {
hb._fjthrds[i] = (short)H2O.getWrkThrPoolSize(i);
hb._fjqueue[i] = (short)H2O.getWrkQueueSize(i);
}
hb._tcps_active= (char)H2ONode.TCPS.get();
// get the usable and total disk storage for the partition where the
// persistent KV pairs are stored
hb.set_free_disk(H2O.getPM().getIce().getUsableSpace());
hb.set_max_disk (H2O.getPM().getIce().getTotalSpace() );
// get cpu utilization for the system and for this process. (linux only.)
LinuxProcFileReader lpfr = new LinuxProcFileReader();
lpfr.read();
if (lpfr.valid()) {
hb._system_idle_ticks = lpfr.getSystemIdleTicks();
hb._system_total_ticks = lpfr.getSystemTotalTicks();
hb._process_total_ticks = lpfr.getProcessTotalTicks();
hb._process_num_open_fds = lpfr.getProcessNumOpenFds();
}
else {
hb._system_idle_ticks = -1;
hb._system_total_ticks = -1;
hb._process_total_ticks = -1;
hb._process_num_open_fds = -1;
}
hb._num_cpus = (short)Runtime.getRuntime().availableProcessors();
hb._cpus_allowed = (short) lpfr.getProcessCpusAllowed();
if (H2O.ARGS.nthreads < hb._cpus_allowed) {
hb._cpus_allowed = H2O.ARGS.nthreads;
}
hb._nthreads = H2O.ARGS.nthreads;
try {
hb._pid = Integer.parseInt(lpfr.getProcessID());
}
catch (Exception ignore) {}
// Announce what Cloud we think we are in.
// Publish our health as well.
UDPHeartbeat.build_and_multicast(cloud, hb);
// If we have no internet connection, then the multicast goes
// nowhere and we never receive a heartbeat from ourselves!
// Fake it now.
long now = System.currentTimeMillis();
H2O.SELF._last_heard_from = now;
// Look for napping Nodes & propose removing from Cloud
for( H2ONode h2o : cloud._memary ) {
long delta = now - h2o._last_heard_from;
if( delta > SUSPECT ) {// We suspect this Node has taken a dirt nap
if( !h2o._announcedLostContact ) {
Paxos.print("hart: announce suspect node",cloud._memary,h2o.toString());
h2o._announcedLostContact = true;
}
} else if( h2o._announcedLostContact ) {
Paxos.print("hart: regained contact with node",cloud._memary,h2o.toString());
h2o._announcedLostContact = false;
}
}
// Run mini-benchmark every 5 mins. However, on startup - do not have
// all JVMs immediately launch a all-core benchmark - they will fight
// with each other. Stagger them using the hashcode.
// Run this benchmark *before* testing the heap or GC, so the GC numbers
// are current as of the send time.
if( (counter+Math.abs(H2O.SELF.hashCode()*0xDECAF /*spread wider than 1 apart*/)) % (300/(Float.isNaN(hb._gflops)?10:1)) == 0) {
hb._gflops = (float)Linpack.run(hb._cpus_allowed);
hb._membw = (float)MemoryBandwidth.run(hb._cpus_allowed);
}
counter++;
// Once per second, for the entire cloud a Node will multi-cast publish
// itself, so other unrelated Clouds discover each other and form up.
try { Thread.sleep(SLEEP); } // Only once-sec per entire Cloud
catch( IllegalMonitorStateException ignore ) { }
catch( InterruptedException ignore ) { }
}
}
|
java
|
public static boolean isNonStandard(GeometryType geometryType) {
return GeometryCodes.getCode(geometryType) > GeometryCodes
.getCode(GeometryType.SURFACE);
}
|
java
|
protected void inject(final Object target, final Field field, Object value) throws IllegalAccessException {
field.set(target, value);
}
|
java
|
@Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case AfplibPackage.WINDOW_SPECIFICATION__FLAGS:
return getFLAGS();
case AfplibPackage.WINDOW_SPECIFICATION__RES3:
return getRES3();
case AfplibPackage.WINDOW_SPECIFICATION__CFORMAT:
return getCFORMAT();
case AfplibPackage.WINDOW_SPECIFICATION__UBASE:
return getUBASE();
case AfplibPackage.WINDOW_SPECIFICATION__XRESOL:
return getXRESOL();
case AfplibPackage.WINDOW_SPECIFICATION__YRESOL:
return getYRESOL();
case AfplibPackage.WINDOW_SPECIFICATION__IMGXYRES:
return getIMGXYRES();
case AfplibPackage.WINDOW_SPECIFICATION__XLWIND:
return getXLWIND();
case AfplibPackage.WINDOW_SPECIFICATION__XRWIND:
return getXRWIND();
case AfplibPackage.WINDOW_SPECIFICATION__YBWIND:
return getYBWIND();
case AfplibPackage.WINDOW_SPECIFICATION__YTWIND:
return getYTWIND();
}
return super.eGet(featureID, resolve, coreType);
}
|
python
|
def api_request(self, url, data=None, method='GET', raw=False, file=None):
""" Perform an API request to the given URL, optionally
including the specified data
:type url: String
:param url: the URL to which to make the request
:type data: String
:param data: the data to send with the request, if any
:type method: String
:param method: the HTTP request method
:type raw: Boolean
:para raw: if True, return the raw response, otherwise treat as JSON and return the parsed response
:type file: String
:param file: (Optional) full path to file to be uploaded in a POST request
:returns: the response from the server either as a raw response or a Python dictionary
generated by parsing the JSON response
:raises: APIError if the API request is not successful
"""
if method is 'GET':
response = self.oauth.get(url)
elif method is 'POST':
if file is not None:
response = self.oauth.post(url, data=data, file=file)
else:
response = self.oauth.post(url, data=data)
elif method is 'PUT':
response = self.oauth.put(url, data=data)
elif method is 'DELETE':
response = self.oauth.delete(url)
else:
raise APIError("Unknown request method: %s" % (method,))
# check for error responses
if response.status_code >= 400:
raise APIError(response.status_code,
'',
"Error accessing API (url: %s, method: %s)\nData: %s\nMessage: %s" % (url, method, data, response.text))
if raw:
return response.content
else:
return response.json()
|
python
|
def predict(self, X):
"""Predict for X.
For dask inputs, a dask array or dataframe is returned. For other
inputs (NumPy array, pandas dataframe, scipy sparse matrix), the
regular return value is returned.
Parameters
----------
X : array-like
Returns
-------
y : array-like
"""
self._check_method("predict")
X = self._check_array(X)
if isinstance(X, da.Array):
result = X.map_blocks(
_predict, dtype="int", estimator=self._postfit_estimator, drop_axis=1
)
return result
elif isinstance(X, dd._Frame):
return X.map_partitions(
_predict, estimator=self._postfit_estimator, meta=np.array([1])
)
else:
return _predict(X, estimator=self._postfit_estimator)
|
python
|
def create_engine(engine, options=None, defaults=None):
'''
Creates an instance of an engine.
There is a two-stage instantiation process with engines.
1. ``options``:
The keyword options to instantiate the engine class
2. ``defaults``:
The default configuration for the engine (options often depends on instantiated TTS engine)
'''
if engine not in _ENGINE_MAP.keys():
raise TTSError('Unknown engine %s' % engine)
options = options or {}
defaults = defaults or {}
einst = _ENGINE_MAP[engine](**options)
einst.configure_default(**defaults)
return einst
|
python
|
def _onerror(cls, kmsg, result):
""" To execute on execution failure
:param kser.schemas.Message kmsg: Kafka message
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
logger.error(
"{}.Failed: {}[{}]: {}".format(
cls.__name__, kmsg.entrypoint, kmsg.uuid, result
),
extra=dict(
kmsg=kmsg.dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return cls.onerror(kmsg, result)
|
python
|
def compile_theme(theme_id=None):
"""Compiles a theme."""
from engineer.processors import convert_less
from engineer.themes import ThemeManager
if theme_id is None:
themes = ThemeManager.themes().values()
else:
themes = [ThemeManager.theme(theme_id)]
with(indent(2)):
puts(colored.yellow("Compiling %s themes." % len(themes)))
for theme in themes:
theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()
puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path)))
with indent(4):
puts("Compiling...")
convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),
theme_output_path,
minify=True)
puts(colored.green("Done.", bold=True))
|
python
|
def get_topic_count(self):
""" get_topic_count: get number of topics in tree
Args: None
Returns: int
"""
total = 0
if self.kind == content_kinds.TOPIC or self.kind == "Channel":
total = 1
for child in self.children:
total += child.get_topic_count()
return total
|
python
|
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HAProxyCollector, self).get_default_config()
config.update({
'method': 'http',
'path': 'haproxy',
'url': 'http://localhost/haproxy?stats;csv',
'user': 'admin',
'pass': 'password',
'sock': '/var/run/haproxy.sock',
'ignore_servers': False,
})
return config
|
java
|
public Iterable<String> components() {
ImmutableList.Builder<String> components = ImmutableList.builder();
buildComponents(components);
return components.build();
}
|
java
|
public Record getRecordAt(int i)
{
i -= DBConstants.MAIN_FIELD; // Zero based index
try {
return (Record)this.elementAt(i);
} catch (ArrayIndexOutOfBoundsException e) {
}
return null; // Not found
}
|
java
|
public void unscheduleJob(String jobName)
throws JobException {
if (this.scheduledJobs.containsKey(jobName)) {
try {
this.scheduler.getScheduler().deleteJob(this.scheduledJobs.remove(jobName));
} catch (SchedulerException se) {
LOG.error("Failed to unschedule and delete job " + jobName, se);
throw new JobException("Failed to unschedule and delete job " + jobName, se);
}
}
}
|
java
|
private boolean discoverGlobalFaultData_rcv(Set<Long> hsIds) {
long blockedOnReceiveStart = System.currentTimeMillis();
long lastReportTime = 0;
boolean haveEnough = false;
int [] forwardStallCount = new int[] {FORWARD_STALL_COUNT};
do {
VoltMessage m = m_mailbox.recvBlocking(receiveSubjects, 5);
/*
* If fault resolution takes longer then 10 seconds start logging
*/
final long now = System.currentTimeMillis();
if (now - blockedOnReceiveStart > 10000) {
if (now - lastReportTime > 60000) {
lastReportTime = System.currentTimeMillis();
haveNecessaryFaultInfo(m_seeker.getSurvivors(), true);
}
}
if (m == null) {
// Send a heartbeat to keep the dead host timeout active. Needed because IV2 doesn't
// generate its own heartbeats to keep this running.
m_meshAide.sendHeartbeats(m_seeker.getSurvivors());
} else if (m.getSubject() == Subject.SITE_FAILURE_UPDATE.getId()) {
SiteFailureMessage sfm = (SiteFailureMessage) m;
if ( !m_seeker.getSurvivors().contains(m.m_sourceHSId)
|| m_failedSites.contains(m.m_sourceHSId)
|| m_failedSites.containsAll(sfm.getFailedSites())) continue;
if (!sfm.m_decision.isEmpty()) {
m_decidedSurvivors.put(sfm.m_sourceHSId, sfm);
}
updateFailedSitesLedger(hsIds, sfm);
m_seeker.add(sfm);
addForwardCandidate(new SiteFailureForwardMessage(sfm));
m_recoveryLog.info("Agreement, Received " + sfm);
if (m_recoveryLog.isDebugEnabled()) {
m_recoveryLog.info(String.format("\n %s\n %s\n %s\n %s\n %s",
m_seeker.dumpAlive(), m_seeker.dumpDead(),
m_seeker.dumpReported(), m_seeker.dumpSurvivors(),
dumpInTrouble()));
}
} else if (m.getSubject() == Subject.SITE_FAILURE_FORWARD.getId()) {
SiteFailureForwardMessage fsfm = (SiteFailureForwardMessage) m;
addForwardCandidate(fsfm);
if ( !hsIds.contains(fsfm.m_sourceHSId)
|| m_seeker.getSurvivors().contains(fsfm.m_reportingHSId)
|| m_failedSites.contains(fsfm.m_reportingHSId)
|| m_failedSites.containsAll(fsfm.getFailedSites())) continue;
m_seeker.add(fsfm);
m_recoveryLog.info("Agreement, Received forward " + fsfm);
if (m_recoveryLog.isDebugEnabled()) {
m_recoveryLog.debug(String.format("\n %s\n %s\n %s\n %s\n %s",
m_seeker.dumpAlive(), m_seeker.dumpDead(),
m_seeker.dumpReported(), m_seeker.dumpSurvivors(),
dumpInTrouble()));
}
forwardStallCount[0] = FORWARD_STALL_COUNT;
} else if (m.getSubject() == Subject.FAILURE.getId()) {
/*
* If the fault distributor reports a new fault, ignore it if it is known , otherwise
* re-deliver the message to ourself and then abort so that the process can restart.
*/
FaultMessage fm = (FaultMessage) m;
Discard ignoreIt = mayIgnore(hsIds, fm);
if (Discard.DoNot == ignoreIt) {
m_mailbox.deliverFront(m);
m_recoveryLog.info("Agreement, Detected a concurrent failure from FaultDistributor, new failed site "
+ CoreUtils.hsIdToString(fm.failedSite));
return false;
} else {
if (m_recoveryLog.isDebugEnabled()) {
ignoreIt.log(fm);
}
}
}
haveEnough = haveEnough || haveNecessaryFaultInfo(m_seeker.getSurvivors(), false);
if (haveEnough) {
Iterator<Map.Entry<Long, SiteFailureForwardMessage>> itr =
m_forwardCandidates.entrySet().iterator();
while (itr.hasNext()) {
Map.Entry<Long, SiteFailureForwardMessage> e = itr.next();
Set<Long> unseenBy = m_seeker.forWhomSiteIsDead(e.getKey());
if (unseenBy.size() > 0) {
m_mailbox.send(Longs.toArray(unseenBy), e.getValue());
m_recoveryLog.info("Agreement, fowarding to "
+ CoreUtils.hsIdCollectionToString(unseenBy)
+ " " + e.getValue());
}
itr.remove();
}
}
} while (!haveEnough || m_seeker.needForward(forwardStallCount));
return true;
}
|
java
|
public void setConfigurationAggregatorNames(java.util.Collection<String> configurationAggregatorNames) {
if (configurationAggregatorNames == null) {
this.configurationAggregatorNames = null;
return;
}
this.configurationAggregatorNames = new com.amazonaws.internal.SdkInternalList<String>(configurationAggregatorNames);
}
|
python
|
def _estimate_centers_widths(
self,
unique_R,
inds,
X,
W,
init_centers,
init_widths,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value.
"""
# least_squares only accept x in 1D format
init_estimate = np.hstack(
(init_centers.ravel(), init_widths.ravel())) # .copy()
data_sigma = 1.0 / math.sqrt(2.0) * np.std(X)
final_estimate = least_squares(
self._residual_multivariate,
init_estimate,
args=(
unique_R,
inds,
X,
W,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci,
data_sigma),
method=self.nlss_method,
loss=self.nlss_loss,
bounds=self.bounds,
verbose=0,
x_scale=self.x_scale,
tr_solver=self.tr_solver)
return final_estimate.x, final_estimate.cost
|
python
|
def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
"""Returns a human readable message for printing to the console."""
messages = self._debug_messages()
states = collections.defaultdict(set)
limit_per_state = collections.Counter()
for t in self._trials:
states[t.status].add(t)
# Show at most max_debug total, but divide the limit fairly
while max_debug > 0:
start_num = max_debug
for s in states:
if limit_per_state[s] >= len(states[s]):
continue
max_debug -= 1
limit_per_state[s] += 1
if max_debug == start_num:
break
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_per_state = {
state: len(trials)
for state, trials in states.items()
}
total_number_of_trials = sum(num_trials_per_state.values())
if total_number_of_trials > 0:
messages.append("Number of trials: {} ({})"
"".format(total_number_of_trials,
num_trials_per_state))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
messages.append("{} trials:".format(state))
sorted_trials = sorted(
trials, key=lambda t: _naturalize(t.experiment_tag))
if len(trials) > limit:
tail_length = limit // 2
first = sorted_trials[:tail_length]
for t in first:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
messages.append(
" ... {} not shown".format(len(trials) - tail_length * 2))
last = sorted_trials[-tail_length:]
for t in last:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
else:
for t in sorted_trials:
messages.append(" - {}:\t{}".format(
t, t.progress_string()))
return "\n".join(messages) + "\n"
|
java
|
public ServiceFuture<Void> beginTerminateAsync(String resourceGroupName, String workspaceName, String experimentName, String jobName, final ServiceCallback<Void> serviceCallback) {
return ServiceFuture.fromResponse(beginTerminateWithServiceResponseAsync(resourceGroupName, workspaceName, experimentName, jobName), serviceCallback);
}
|
python
|
def search(request, spec, operator='and'):
"""
Search the package database using the indicated search spec.
The spec may include any of the keywords described in the above list
(except 'stable_version' and 'classifiers'),
for example: {'description': 'spam'} will search description fields.
Within the spec, a field's value can be a string or a list of strings
(the values within the list are combined with an OR),
for example: {'name': ['foo', 'bar']}.
Valid keys for the spec dict are listed here. Invalid keys are ignored:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
Arguments for different fields are combined using either "and"
(the default) or "or".
Example: search({'name': 'foo', 'description': 'bar'}, 'or').
The results are returned as a list of dicts
{'name': package name,
'version': package release version,
'summary': package release summary}
"""
api = pypi.proxy
rv = []
# search in proxy
for k, v in spec.items():
rv += api.search({k: v}, True)
# search in local
session = DBSession()
release = Release.search(session, spec, operator)
rv += [{'name': r.package.name,
'version': r.version,
'summary': r.summary,
# hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html
'_pypi_ordering':'',
} for r in release]
return rv
|
python
|
def decode_file_args(self, argv: List[str]) -> List[str]:
"""
Preprocess a configuration file. The location of the configuration file is stored in the parser so that the
FileOrURI action can add relative locations.
:param argv: raw options list
:return: options list with '--conf' references replaced with file contents
"""
for i in range(0, len(argv) - 1):
# TODO: take prefix into account
if argv[i] == '--conf':
del argv[i]
conf_file = argv[i]
del (argv[i])
with open(conf_file) as config_file:
conf_args = shlex.split(config_file.read())
# We take advantage of a poential bug in the parser where you can say "foo -u 1 -u 2" and get
# 2 as a result
argv = self.fix_rel_paths(conf_args, conf_file) + argv
return self.decode_file_args(argv)
return argv
|
java
|
HttpRedirectionValidator getRedirectionValidator() {
if (redirectionValidator == null) {
redirectionValidator = redirection -> {
if (!nodeInScope(redirection.getEscapedURI())) {
if (log.isDebugEnabled()) {
log.debug("Skipping redirection out of scan's scope: " + redirection);
}
return false;
}
return true;
};
}
return redirectionValidator;
}
|
java
|
private ZealotKhala doLike(String prefix, String field, Object value, boolean match, boolean positive) {
if (match) {
String suffix = positive ? ZealotConst.LIKE_KEY : ZealotConst.NOT_LIKE_KEY;
SqlInfoBuilder.newInstace(this.source.setPrefix(prefix).setSuffix(suffix)).buildLikeSql(field, value);
this.source.resetPrefix();
}
return this;
}
|
python
|
def get_all_source(self, **kwargs): # noqa: E501
"""Get all sources for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_source(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str cursor:
:param int limit:
:return: ResponseContainerPagedSource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_source_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_source_with_http_info(**kwargs) # noqa: E501
return data
|
java
|
private Pair<Long, Integer> updateCooked(byte[] compressedData) {
// Uncompress (inflate) the bytes.
byte[] cookedBytes;
try {
cookedBytes = gunzipBytes(compressedData);
} catch (IOException e) {
throw new RuntimeException("Unable to decompress elastic hashinator data.");
}
int numEntries = (cookedBytes.length >= 4
? ByteBuffer.wrap(cookedBytes).getInt()
: 0);
int tokensSize = 4 * numEntries;
int partitionsSize = 4 * numEntries;
if (numEntries <= 0 || cookedBytes.length != 4 + tokensSize + partitionsSize) {
throw new RuntimeException("Bad elastic hashinator cooked config size.");
}
long tokens = Bits.unsafe.allocateMemory(8 * numEntries);
ByteBuffer tokenBuf = ByteBuffer.wrap(cookedBytes, 4, tokensSize);
ByteBuffer partitionBuf = ByteBuffer.wrap(cookedBytes, 4 + tokensSize, partitionsSize);
int tokensArray[] = new int[numEntries];
for (int zz = 3; zz >= 0; zz--) {
for (int ii = 0; ii < numEntries; ii++) {
int value = tokenBuf.get();
value = (value << (zz * 8)) & (0xFF << (zz * 8));
tokensArray[ii] = (tokensArray[ii] | value);
}
}
int lastToken = Integer.MIN_VALUE;
for (int ii = 0; ii < numEntries; ii++) {
int token = tokensArray[ii];
Preconditions.checkArgument(token >= lastToken);
lastToken = token;
long ptr = tokens + (ii * 8);
Bits.unsafe.putInt(ptr, token);
final int partitionId = partitionBuf.getInt();
Bits.unsafe.putInt(ptr + 4, partitionId);
}
return Pair.of(tokens, numEntries);
}
|
java
|
public Matrix4f invert(Matrix4f dest) {
if ((properties & PROPERTY_IDENTITY) != 0) {
return dest.identity();
} else if ((properties & PROPERTY_TRANSLATION) != 0)
return invertTranslation(dest);
else if ((properties & PROPERTY_ORTHONORMAL) != 0)
return invertOrthonormal(dest);
else if ((properties & PROPERTY_AFFINE) != 0)
return invertAffine(dest);
else if ((properties & PROPERTY_PERSPECTIVE) != 0)
return invertPerspective(dest);
return invertGeneric(dest);
}
|
java
|
public static byte[] createType1Message(String workStation,
String domain, Integer customFlags, byte[] osVersion) {
byte[] msg;
if (osVersion != null && osVersion.length != 8) {
throw new IllegalArgumentException(
"osVersion parameter should be a 8 byte wide array");
}
if (workStation == null || domain == null) {
throw new NullPointerException(
"workStation and domain must be non null");
}
int flags = customFlags != null ? customFlags
| FLAG_NEGOTIATE_WORKSTATION_SUPPLIED
| FLAG_NEGOTIATE_DOMAIN_SUPPLIED : DEFAULT_FLAGS;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
baos.write(NTLM_SIGNATURE);
baos.write(ByteUtilities.writeInt(MESSAGE_TYPE_1));
baos.write(ByteUtilities.writeInt(flags));
byte[] domainData = ByteUtilities.getOEMStringAsByteArray(domain);
byte[] workStationData = ByteUtilities
.getOEMStringAsByteArray(workStation);
int pos = (osVersion != null) ? 40 : 32;
baos.write(writeSecurityBuffer((short) domainData.length, pos
+ workStationData.length));
baos
.write(writeSecurityBuffer((short) workStationData.length,
pos));
if (osVersion != null) {
baos.write(osVersion);
}
// Order is not mandatory since a pointer is given in the security buffers
baos.write(workStationData);
baos.write(domainData);
msg = baos.toByteArray();
baos.close();
} catch (IOException e) {
return null;
}
return msg;
}
|
python
|
def _encode_filename(filename): # pragma: no cover
"""Return a byte string suitable for a filename.
Unicode is encoded using an encoding adapted to what both cairo and the
filesystem want.
"""
# Don't replace unknown characters as '?' is forbidden in Windows filenames
errors = 'ignore' if os.name == 'nt' else 'replace'
if not isinstance(filename, bytes):
if os.name == 'nt' and cairo.cairo_version() >= 11510:
# Since 1.15.10, cairo uses utf-8 filenames on Windows
filename = filename.encode('utf-8', errors=errors)
else:
try:
filename = filename.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
# Use plain ASCII filenames as fallback
filename = filename.encode('ascii', errors=errors)
# TODO: avoid characters forbidden in filenames?
return ffi.new('char[]', filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.