language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | static List<ProjectDefinition> getTopDownParentProjects(ProjectDefinition project) {
List<ProjectDefinition> result = new ArrayList<>();
ProjectDefinition p = project;
while (p != null) {
result.add(0, p);
p = p.getParent();
}
return result;
} |
python | def rt_time_log(logfile, startdate):
"""
Open and read reftek raw log-file.
Function to open and read a log-file as written by a RefTek RT130
datalogger. The information within is then scanned for timing errors
above the threshold.
:type logfile: str
:param logfile: The logfile to look in
:type startdate: datetime.date
:param startdate: The start of the file as a date - files contain timing \
and the julian day, but not the year.
:returns: List of tuple of (:class:`datetime.datetime`, float) as time \
stamps and phase error.
"""
if os.name == 'nt':
f = io.open(logfile, 'rb')
else:
f = io.open(logfile, 'rb')
phase_err = []
lock = []
# Extract all the phase errors
for line_binary in f:
try:
line = line_binary.decode("utf8", "ignore")
except UnicodeDecodeError:
warnings.warn('Cannot decode line, skipping')
continue
if re.search("INTERNAL CLOCK PHASE ERROR", line):
match = re.search("INTERNAL CLOCK PHASE ERROR", line)
d_start = match.start() - 13
phase_err.append((dt.datetime.strptime(str(startdate.year) +
':' +
line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
float(line.rstrip().split()[-2]) *
0.000001))
elif re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line):
match = re.search("EXTERNAL CLOCK POWER IS TURNED OFF", line)
d_start = match.start() - 13
lock.append((dt.datetime.strptime(str(startdate.year) +
':' + line[d_start:d_start + 12],
'%Y:%j:%H:%M:%S'),
999))
if len(phase_err) == 0 and len(lock) > 0:
phase_err = lock
f.close()
return phase_err |
python | def verificar_permissao(self, id_vlan, nome_equipamento, nome_interface):
"""Check if there is communication permission for VLAN to trunk.
Run script 'configurador'.
The "stdout" key value of response dictionary is 1(one) if VLAN has permission,
or 0(zero), otherwise.
:param id_vlan: VLAN identifier.
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise VlanNaoExisteError: VLAN does not exist.
:raise InvalidParameterError: VLAN id is none or invalid.
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
"""
if not is_valid_int_param(id_vlan):
raise InvalidParameterError(
u'Vlan id is invalid or was not informed.')
url = 'vlan/' + str(id_vlan) + '/check/'
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', url)
return self.response(code, xml) |
python | def _set_iroot_via_xroot(self, xroot):
"""Determine the index of the root cell.
Given an expression vector, find the observation index that is closest
to this vector.
Parameters
----------
xroot : np.ndarray
Vector that marks the root cell, the vector storing the initial
condition, only relevant for computing pseudotime.
"""
if self._adata.shape[1] != xroot.size:
raise ValueError(
'The root vector you provided does not have the '
'correct dimension.')
# this is the squared distance
dsqroot = 1e10
iroot = 0
for i in range(self._adata.shape[0]):
diff = self._adata.X[i, :] - xroot
dsq = diff.dot(diff)
if dsq < dsqroot:
dsqroot = dsq
iroot = i
if np.sqrt(dsqroot) < 1e-10: break
logg.msg('setting root index to', iroot, v=4)
if self.iroot is not None and iroot != self.iroot:
logg.warn('Changing index of iroot from {} to {}.'.format(self.iroot, iroot))
self.iroot = iroot |
java | public HttpResponse decompress() throws IOException {
String enc = header("Content-Encoding");
return (enc != null && enc.contains("gzip")) ? unzip() : this;
} |
python | async def readobj(self):
"""
Return a parsed Redis object or an exception
when something wrong happened.
"""
assert self._parser is not None, "set_parser must be called"
while True:
obj = self._parser.gets()
if obj is not False:
# TODO: implement resume the read
# Return any valid object and the Nil->None
# case. When its False there is nothing there
# to be parsed and we have to wait for more data.
return obj
if self._exception:
raise self._exception
if self._eof:
break
await self._wait_for_data('readobj') |
java | public MessagePacker addPayload(byte[] src, int off, int len)
throws IOException
{
if (buffer == null || buffer.size() - position < len || len > bufferFlushThreshold) {
flush(); // call flush before add
// Directly add the payload without using the buffer
out.add(src, off, len);
totalFlushBytes += len;
}
else {
buffer.putBytes(position, src, off, len);
position += len;
}
return this;
} |
python | def replace_lun(self, *lun_list):
"""Replaces the exiting LUNs to lun_list."""
lun_add = self._prepare_luns_add(lun_list)
lun_remove = self._prepare_luns_remove(lun_list, False)
return self.modify(lun_add=lun_add, lun_remove=lun_remove) |
python | def save(self, filename=None):
""" Saves the runtime configuration to disk.
Parameters
----------
filename: str or None, default=None
writeable path to configuration filename.
If None, use default location and filename.
"""
if not filename:
filename = self.DEFAULT_CONFIG_FILE_NAME
else:
filename = str(filename)
# try to extract the path from filename and use is as cfg_dir
head, tail = os.path.split(filename)
if head:
self._cfg_dir = head
# we are search for .cfg files in cfg_dir so make sure it contains the proper extension.
base, ext = os.path.splitext(tail)
if ext != ".cfg":
filename += ".cfg"
# if we have no cfg dir, try to create it first. Return if it failed.
if not self.cfg_dir or not os.path.isdir(self.cfg_dir) or not os.stat(self.cfg_dir) != os.W_OK:
try:
self.cfg_dir = self.DEFAULT_CONFIG_DIR
except ConfigDirectoryException as cde:
print(Config._format_msg('Could not create configuration directory "{dir}"! config.save() failed.'
' Please set a writeable location with config.cfg_dir = val. Error was {exc}'
.format(dir=self.cfg_dir, exc=cde)))
return
filename = os.path.join(self.cfg_dir, filename)
try:
with open(filename, 'w') as fh:
self._conf_values.write(fh)
except IOError as ioe:
print(Config._format_msg("Save failed with error %s" % ioe)) |
java | private void handleJoinEvent(Node node) {
GossipMember member = new GossipMember(MemberId.from(node.id().id()), node.address());
if (!members.containsKey(member.id())) {
sendHeartbeat(member);
}
} |
python | def cache_key(self):
"""
Key for the cache handling current site.
"""
return '%s:%s' % (super(EntryPublishedVectorBuilder, self).cache_key,
Site.objects.get_current().pk) |
python | def hash(value, arg):
"""
Returns a hex-digest of the passed in value for the hash algorithm given.
"""
arg = str(arg).lower()
if sys.version_info >= (3,0):
value = value.encode("utf-8")
if not arg in get_available_hashes():
raise TemplateSyntaxError("The %s hash algorithm does not exist. Supported algorithms are: %" % (arg, get_available_hashes()))
try:
f = getattr(hashlib, arg)
hashed = f(value).hexdigest()
except Exception:
raise ValueError("The %s hash algorithm cannot produce a hex digest. Ensure that OpenSSL is properly installed." % arg)
return hashed |
python | def get_url(cls, ticket):
"""
通过ticket换取二维码地址
详情请参考
https://mp.weixin.qq.com/wiki?t=resource/res_main&id=mp1443433542
:param ticket: 二维码 ticket 。可以通过 :func:`create` 获取到
:return: 返回的二维码地址
使用示例::
from wechatpy import WeChatClient
client = WeChatClient('appid', 'secret')
url = client.qrcode.get_url('ticket data')
"""
url = 'https://mp.weixin.qq.com/cgi-bin/showqrcode?ticket={ticket}'
if isinstance(ticket, dict):
ticket = ticket['ticket']
ticket = six.moves.urllib.parse.quote(ticket)
return url.format(ticket=ticket) |
java | @Override
public Calendar reply(Calendar request) {
Calendar reply = transform(Method.REPLY, request);
reply.validate();
return reply;
} |
python | def getDeviceIterator(self, skip_on_error=False):
"""
Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
"""
device_p_p = libusb1.libusb_device_p_p()
libusb_device_p = libusb1.libusb_device_p
device_list_len = libusb1.libusb_get_device_list(self.__context_p,
byref(device_p_p))
mayRaiseUSBError(device_list_len)
try:
for device_p in device_p_p[:device_list_len]:
try:
# Instanciate our own libusb_device_p object so we can free
# libusb-provided device list. Is this a bug in ctypes that
# it doesn't copy pointer value (=pointed memory address) ?
# At least, it's not so convenient and forces using such
# weird code.
device = USBDevice(self, libusb_device_p(device_p.contents))
except USBError:
if not skip_on_error:
raise
else:
self.__close_set.add(device)
yield device
finally:
libusb1.libusb_free_device_list(device_p_p, 1) |
java | @Override
public void addPackageDescription(Content packageContentTree) {
if (!utils.getBody(packageElement).isEmpty()) {
Content tree = configuration.allowTag(HtmlTag.SECTION) ? sectionTree : packageContentTree;
addDeprecationInfo(tree);
addInlineComment(packageElement, tree);
}
} |
python | def add_statements(self, pmid, stmts):
"""Add INDRA Statements to the incremental model indexed by PMID.
Parameters
----------
pmid : str
The PMID of the paper from which statements were extracted.
stmts : list[indra.statements.Statement]
A list of INDRA Statements to be added to the model.
"""
if pmid not in self.stmts:
self.stmts[pmid] = stmts
else:
self.stmts[pmid] += stmts |
java | public void marshall(SegmentGroup segmentGroup, ProtocolMarshaller protocolMarshaller) {
if (segmentGroup == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(segmentGroup.getDimensions(), DIMENSIONS_BINDING);
protocolMarshaller.marshall(segmentGroup.getSourceSegments(), SOURCESEGMENTS_BINDING);
protocolMarshaller.marshall(segmentGroup.getSourceType(), SOURCETYPE_BINDING);
protocolMarshaller.marshall(segmentGroup.getType(), TYPE_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def load_dxf(file_obj, **kwargs):
"""
Load a DXF file to a dictionary containing vertices and
entities.
Parameters
----------
file_obj: file or file- like object (has object.read method)
Returns
----------
result: dict, keys are entities, vertices and metadata
"""
def info(e):
"""
Pull metadata based on group code, and return as a dict.
"""
# which keys should we extract from the entity data
# DXF group code : our metadata key
get = {'8': 'layer'}
# replace group codes with names and only
# take info from the entity dict if it is in cand
renamed = {get[k]: util.make_sequence(v)[0] for k,
v in e.items() if k in get}
return renamed
def convert_line(e):
"""
Convert DXF LINE entities into trimesh Line entities.
"""
# create a single Line entity
entities.append(Line(points=len(vertices) + np.arange(2),
**info(e)))
# add the vertices to our collection
vertices.extend(np.array([[e['10'], e['20']],
[e['11'], e['21']]],
dtype=np.float64))
def convert_circle(e):
"""
Convert DXF CIRCLE entities into trimesh Circle entities
"""
R = float(e['40'])
C = np.array([e['10'],
e['20']]).astype(np.float64)
points = to_threepoint(center=C[0:2],
radius=R)
entities.append(Arc(points=(len(vertices) + np.arange(3)),
closed=True,
**info(e)))
vertices.extend(points)
def convert_arc(e):
"""
Convert DXF ARC entities into into trimesh Arc entities.
"""
# the radius of the circle
R = float(e['40'])
# the center point of the circle
C = np.array([e['10'],
e['20']], dtype=np.float64)
# the start and end angle of the arc, in degrees
# this may depend on an AUNITS header data
A = np.radians(np.array([e['50'],
e['51']], dtype=np.float64))
# convert center/radius/angle representation
# to three points on the arc representation
points = to_threepoint(center=C[0:2],
radius=R,
angles=A)
# add a single Arc entity
entities.append(Arc(points=len(vertices) + np.arange(3),
closed=False,
**info(e)))
# add the three vertices
vertices.extend(points)
def convert_polyline(e):
"""
Convert DXF LWPOLYLINE entities into trimesh Line entities.
"""
# load the points in the line
lines = np.column_stack((
e['10'], e['20'])).astype(np.float64)
# save entity info so we don't have to recompute
polyinfo = info(e)
# 70 is the closed flag for polylines
# if the closed flag is set make sure to close
is_closed = '70' in e and int(e['70'][0]) & 1
if is_closed:
lines = np.vstack((lines, lines[:1]))
# 42 is the vertex bulge flag for LWPOLYLINE entities
# "bulge" is autocad for "add a stupid arc using flags
# in my otherwise normal polygon", it's like SVG arc
# flags but somehow even more annoying
if '42' in e:
# get the actual bulge float values
bulge = np.array(e['42'], dtype=np.float64)
# what position were vertices stored at
vid = np.nonzero(chunk[:, 0] == '10')[0]
# what position were bulges stored at in the chunk
bid = np.nonzero(chunk[:, 0] == '42')[0]
# filter out endpoint bulge if we're not closed
if not is_closed:
bid_ok = bid < vid.max()
bid = bid[bid_ok]
bulge = bulge[bid_ok]
# which vertex index is bulge value associated with
bulge_idx = np.searchsorted(vid, bid)
# convert stupid bulge to Line/Arc entities
v, e = bulge_to_arcs(lines=lines,
bulge=bulge,
bulge_idx=bulge_idx,
is_closed=is_closed)
for i in e:
# offset added entities by current vertices length
i.points += len(vertices)
vertices.extend(v)
entities.extend(e)
# done with this polyline
return
# we have a normal polyline so just add it
# as single line entity and vertices
entities.append(Line(
points=np.arange(len(lines)) + len(vertices),
**polyinfo))
vertices.extend(lines)
def convert_bspline(e):
"""
Convert DXF Spline entities into trimesh BSpline entities.
"""
# in the DXF there are n points and n ordered fields
# with the same group code
points = np.column_stack((e['10'],
e['20'])).astype(np.float64)
knots = np.array(e['40']).astype(np.float64)
# if there are only two points, save it as a line
if len(points) == 2:
# create a single Line entity
entities.append(Line(points=len(vertices) +
np.arange(2),
**info(e)))
# add the vertices to our collection
vertices.extend(points)
return
# check bit coded flag for closed
# closed = bool(int(e['70'][0]) & 1)
# check euclidean distance to see if closed
closed = np.linalg.norm(points[0] -
points[-1]) < tol.merge
# create a BSpline entity
entities.append(BSpline(
points=np.arange(len(points)) + len(vertices),
knots=knots,
closed=closed,
**info(e)))
# add the vertices
vertices.extend(points)
def convert_text(e):
"""
Convert a DXF TEXT entity into a native text entity.
"""
if '50' in e:
# rotation angle converted to radians
angle = np.radians(float(e['50']))
else:
# otherwise no rotation
angle = 0.0
# text with leading and trailing whitespace removed
text = e['1'].strip()
# height of text
if '40' in e:
height = float(e['40'])
else:
height = None
# origin point
origin = np.array([e['10'],
e['20']]).astype(np.float64)
# an origin- relative point (so transforms work)
vector = origin + [np.cos(angle), np.sin(angle)]
# try to extract a (horizontal, vertical) text alignment
align = ['center', 'center']
try:
align[0] = ['left', 'center', 'right'][int(e['72'])]
except BaseException:
pass
# append the entity
entities.append(Text(origin=len(vertices),
vector=len(vertices) + 1,
height=height,
text=text,
align=align))
# append the text origin and direction
vertices.append(origin)
vertices.append(vector)
# in a DXF file, lines come in pairs,
# a group code then the next line is the value
# we are removing all whitespace then splitting with the
# splitlines function which uses the universal newline method
raw = file_obj.read()
# if we've been passed bytes
if hasattr(raw, 'decode'):
# search for the sentinel string indicating binary DXF
# do it by encoding sentinel to bytes and subset searching
if raw[:22].find(b'AutoCAD Binary DXF') != -1:
if _teigha is None:
# no converter to ASCII DXF available
raise ValueError('binary DXF not supported!')
else:
# convert binary DXF to R14 ASCII DXF
raw = _teigha_convert(raw, extension='dxf')
else:
# we've been passed bytes that don't have the
# header for binary DXF so try decoding as UTF-8
raw = raw.decode('utf-8', errors='ignore')
# remove trailing whitespace
raw = str(raw).strip()
# without any spaces and in upper case
cleaned = raw.replace(' ', '').strip().upper()
# blob with spaces and original case
blob_raw = np.array(str.splitlines(raw)).reshape((-1, 2))
# if this reshape fails, it means the DXF is malformed
blob = np.array(str.splitlines(cleaned)).reshape((-1, 2))
# get the section which contains the header in the DXF file
endsec = np.nonzero(blob[:, 1] == 'ENDSEC')[0]
# get the section which contains entities in the DXF file
entity_start = np.nonzero(blob[:, 1] == 'ENTITIES')[0][0]
entity_end = endsec[np.searchsorted(endsec, entity_start)]
entity_blob = blob[entity_start:entity_end]
# store the entity blob with original case
entity_raw = blob_raw[entity_start:entity_end]
# store metadata
metadata = {}
# try reading the header, which may be malformed
header_start = np.nonzero(blob[:, 1] == 'HEADER')[0]
if len(header_start) > 0:
header_end = endsec[np.searchsorted(endsec, header_start[0])]
header_blob = blob[header_start[0]:header_end]
# store some properties from the DXF header
metadata['DXF_HEADER'] = {}
for key, group in [('$ACADVER', '1'),
('$DIMSCALE', '40'),
('$DIMALT', '70'),
('$DIMALTF', '40'),
('$DIMUNIT', '70'),
('$INSUNITS', '70'),
('$LUNITS', '70')]:
value = get_key(header_blob,
key,
group)
if value is not None:
metadata['DXF_HEADER'][key] = value
# store unit data pulled from the header of the DXF
# prefer LUNITS over INSUNITS
# I couldn't find a table for LUNITS values but they
# look like they are 0- indexed versions of
# the INSUNITS keys, so for now offset the key value
for offset, key in [(-1, '$LUNITS'),
(0, '$INSUNITS')]:
# get the key from the header blob
units = get_key(header_blob, key, '70')
# if it exists add the offset
if units is None:
continue
metadata[key] = units
units += offset
# if the key is in our list of units store it
if units in _DXF_UNITS:
metadata['units'] = _DXF_UNITS[units]
# warn on drawings with no units
if 'units' not in metadata:
log.warning('DXF doesn\'t have units specified!')
# find the start points of entities
group_check = entity_blob[:, 0] == '0'
inflection = np.nonzero(group_check)[0]
# DXF object to trimesh object converters
loaders = {'LINE': (dict, convert_line),
'LWPOLYLINE': (util.multi_dict, convert_polyline),
'ARC': (dict, convert_arc),
'CIRCLE': (dict, convert_circle),
'SPLINE': (util.multi_dict, convert_bspline)}
# store loaded vertices
vertices = []
# store loaded entities
entities = []
# an old-style polyline entity strings its data across
# multiple vertex entities like a real asshole
polyline = None
# loop through chunks of entity information
for index in np.array_split(np.arange(len(entity_blob)),
inflection):
# if there is only a header continue
if len(index) < 1:
continue
# chunk will be an (n, 2) array of (group code, data) pairs
chunk = entity_blob[index]
# the string representing entity type
entity_type = chunk[0][1]
############
# special case old- style polyline entities
if entity_type == 'POLYLINE':
polyline = [dict(chunk)]
# if we are collecting vertex entities
elif polyline is not None and entity_type == 'VERTEX':
polyline.append(dict(chunk))
# the end of a polyline
elif polyline is not None and entity_type == 'SEQEND':
# pull the geometry information for the entity
lines = np.array([[i['10'], i['20']]
for i in polyline[1:]],
dtype=np.float64)
# check for a closed flag on the polyline
if '70' in polyline[0]:
# flag is bit- coded integer
flag = int(polyline[0]['70'])
# first bit represents closed
is_closed = bool(flag & 1)
if is_closed:
lines = np.vstack((lines, lines[:1]))
# get the index of each bulged vertices
bulge_idx = np.array([i for i, e in enumerate(polyline)
if '42' in e],
dtype=np.int64)
# get the actual bulge value
bulge = np.array([float(e['42'])
for i, e in enumerate(polyline)
if '42' in e],
dtype=np.float64)
# convert bulge to new entities
v, e = bulge_to_arcs(lines=lines,
bulge=bulge,
bulge_idx=bulge_idx,
is_closed=is_closed)
for i in e:
# offset entities by existing vertices
i.points += len(vertices)
vertices.extend(v)
entities.extend(e)
# we no longer have an active polyline
polyline = None
elif entity_type == 'TEXT':
# text entities need spaces preserved so take
# group codes from clean representation (0- column)
# and data from the raw representation (1- column)
chunk_raw = entity_raw[index]
# if we didn't use clean group codes we wouldn't
# be able to access them by key as whitespace
# is random and crazy, like: ' 1 '
chunk_raw[:, 0] = entity_blob[index][:, 0]
try:
convert_text(dict(chunk_raw))
except BaseException:
log.warning('failed to load text entity!',
exc_info=True)
# if the entity contains all relevant data we can
# cleanly load it from inside a single function
elif entity_type in loaders:
# the chunker converts an (n,2) list into a dict
chunker, loader = loaders[entity_type]
# convert data to dict
entity_data = chunker(chunk)
# append data to the lists we're collecting
loader(entity_data)
else:
log.debug('Entity type %s not supported',
entity_type)
# stack vertices into single array
vertices = util.vstack_empty(vertices).astype(np.float64)
# return result as kwargs for trimesh.path.Path2D constructor
result = {'vertices': vertices,
'entities': np.array(entities),
'metadata': metadata}
return result |
python | def nullify(v):
"""Convert empty strings and strings with only spaces to None values. """
if isinstance(v, six.string_types):
v = v.strip()
if v is None or v == '':
return None
else:
return v |
java | protected AstNode parseCustomStatement( DdlTokenStream tokens,
AstNode parentNode ) throws ParsingException {
assert tokens != null;
assert parentNode != null;
// DEFAULT DOES NOTHING
// Subclasses can implement additional parsing
return null;
} |
java | public void setInt(String name, int value) throws UnsupportedEncodingException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "setInt", Integer.valueOf(value));
getBodyMap().put(name, Integer.valueOf(value));
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "setInt");
} |
java | @POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
public Response upload(MultipartBody multipart){
Attachment file = multipart.getAttachment("file");
if (file == null) {
return Response.status(400).entity("Missing file part").type(MediaType.TEXT_PLAIN).build();
}
InputStream fileStream = file.getObject(InputStream.class);
String uploaded;
try {
uploaded = m_management.putTempStream(getContext(), fileStream);
logger.debug("File uploaded: ", uploaded);
} catch (ServerException e) {
logger.error(e.toString());
return handleException(e, false);
}
return Response.status(Response.Status.ACCEPTED).entity(uploaded).type(
MediaType.TEXT_PLAIN).build();
} |
java | static ResponseList<HelpResources.Language> createLanguageList(JSONArray list, HttpResponse res
, Configuration conf) throws TwitterException {
if (conf.isJSONStoreEnabled()) {
TwitterObjectFactory.clearThreadLocalMap();
}
try {
int size = list.length();
ResponseList<HelpResources.Language> languages =
new ResponseListImpl<HelpResources.Language>(size, res);
for (int i = 0; i < size; i++) {
JSONObject json = list.getJSONObject(i);
HelpResources.Language language = new LanguageJSONImpl(json);
languages.add(language);
if (conf.isJSONStoreEnabled()) {
TwitterObjectFactory.registerJSONObject(language, json);
}
}
if (conf.isJSONStoreEnabled()) {
TwitterObjectFactory.registerJSONObject(languages, list);
}
return languages;
} catch (JSONException jsone) {
throw new TwitterException(jsone);
}
} |
python | def decode_utf8(f):
"""Decode a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Parameters
----------
f: file
File-like object with read method.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to decode the string.
Utf8DecodeError
When any code point in the utf-8 string is invalid.
Returns
-------
int
Number of bytes consumed.
str
A string utf-8 decoded from ``f``.
"""
decode = codecs.getdecoder('utf8')
buf = f.read(FIELD_U16.size)
if len(buf) < FIELD_U16.size:
raise UnderflowDecodeError()
(num_utf8_bytes,) = FIELD_U16.unpack_from(buf)
num_bytes_consumed = FIELD_U16.size + num_utf8_bytes
buf = f.read(num_utf8_bytes)
if len(buf) < num_utf8_bytes:
raise UnderflowDecodeError()
try:
s, num_chars = decode(buf, 'strict')
except UnicodeError as e:
raise Utf8DecodeError(e)
return num_bytes_consumed, s |
python | def main() -> None:
"""
Command-line processor. See ``--help`` for details.
"""
main_only_quicksetup_rootlogger(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument("directory", nargs="?", default=os.getcwd())
parser.add_argument("--reportevery", default=10000)
args = parser.parse_args()
log.info("Extensions in directory {!r}:", args.directory)
print("\n".join(repr(x) for x in
list_file_extensions(args.directory,
reportevery=args.reportevery))) |
python | def evaluate_rpn(rpn):
"""
Evaluates the RPN form produced my map2rpn.
Returns:
bool
"""
vals_stack = []
for item in rpn:
if item in _ALL_OPS:
# Apply the operator and push to the task.
v2 = vals_stack.pop()
if item in _UNARY_OPS:
res = _UNARY_OPS[item](v2)
elif item in _BIN_OPS:
v1 = vals_stack.pop()
res = _BIN_OPS[item](v1, v2)
else:
raise ValueError("%s not in unary_ops or bin_ops" % str(item))
vals_stack.append(res)
else:
# Push the operand
vals_stack.append(item)
#print(vals_stack)
assert len(vals_stack) == 1
assert isinstance(vals_stack[0], bool)
return vals_stack[0] |
java | @Trivial
private void writeObject(ObjectOutputStream out) throws IOException {
PutField fields = out.putFields();
fields.put(FAILURE, failure);
fields.put(PARAMS, params);
fields.put(REASON, reason);
out.writeFields();
} |
python | def filter_string(self, word):
"""Return a string like the input but containing only legal IPA segments
Args:
word (unicode): input string to be filtered
Returns:
unicode: string identical to `word` but with invalid IPA segments
absent
"""
segs = [m.group(0) for m in self.seg_regex.finditer(word)]
return ''.join(segs) |
python | def backup(schema, uuid, export_filter, export_format, filename, pretty, export_all, omit):
"""Exports all collections to (JSON-) files."""
export_format = export_format.upper()
if pretty:
indent = 4
else:
indent = 0
f = None
if filename:
try:
f = open(filename, 'w')
except (IOError, PermissionError) as e:
backup_log('Could not open output file for writing:', exc=True, lvl=error)
return
def output(what, convert=False):
"""Output the backup in a specified format."""
if convert:
if export_format == 'JSON':
data = json.dumps(what, indent=indent)
else:
data = ""
else:
data = what
if not filename:
print(data)
else:
f.write(data)
if schema is None:
if export_all is False:
backup_log('No schema given.', lvl=warn)
return
else:
schemata = objectmodels.keys()
else:
schemata = [schema]
all_items = {}
for schema_item in schemata:
model = objectmodels[schema_item]
if uuid:
obj = model.find({'uuid': uuid})
elif export_filter:
obj = model.find(literal_eval(export_filter))
else:
obj = model.find()
items = []
for item in obj:
fields = item.serializablefields()
for field in omit:
try:
fields.pop(field)
except KeyError:
pass
items.append(fields)
all_items[schema_item] = items
# if pretty is True:
# output('\n// Objectmodel: ' + schema_item + '\n\n')
# output(schema_item + ' = [\n')
output(all_items, convert=True)
if f is not None:
f.flush()
f.close() |
java | public static Set<JAASSystem> getAllJAASSystems()
{
final Set<JAASSystem> ret = new HashSet<>();
final Cache<Long, JAASSystem> cache = InfinispanCache.get().<Long, JAASSystem>getCache(JAASSystem.IDCACHE);
for (final Map.Entry<Long, JAASSystem> entry : cache.entrySet()) {
ret.add(entry.getValue());
}
return ret;
} |
java | @Override
@Nullable
public Charset charsetForName (@Nonnull final String sCharsetName)
{
final String sRealCharsetName = sCharsetName.toUpperCase (Locale.US);
for (final Charset aCharset : m_aCharsets)
if (aCharset.name ().equals (sRealCharsetName))
return aCharset;
for (final Charset aCharset : m_aCharsets)
if (aCharset.aliases ().contains (sRealCharsetName))
return aCharset;
return null;
} |
java | private Object[] getValues(Map<String, Object> annotationAtts) {
if (null == annotationAtts) {
throw new DuraCloudRuntimeException("Arg annotationAtts is null.");
}
List<Object> values = new ArrayList<Object>();
for (String key : annotationAtts.keySet()) {
Object[] objects = (Object[]) annotationAtts.get(key);
for (Object obj : objects) {
values.add(obj);
}
}
return values.toArray();
} |
java | public String getDisplayValue(Object ob) {
if( ob == null ) {
return "";
}
if( ob instanceof Translator ) {
return getDisplayValue(Locale.getDefault(), ob);
}
return ob.toString();
} |
python | def master_call(self, **kwargs):
'''
Execute a wheel function through the master network interface (eauth).
'''
load = kwargs
load['cmd'] = 'wheel'
interface = self.opts['interface']
if interface == '0.0.0.0':
interface = '127.0.0.1'
master_uri = 'tcp://{}:{}'.format(
salt.utils.zeromq.ip_bracket(interface),
six.text_type(self.opts['ret_port'])
)
channel = salt.transport.client.ReqChannel.factory(self.opts,
crypt='clear',
master_uri=master_uri,
usage='master_call')
try:
ret = channel.send(load)
finally:
channel.close()
if isinstance(ret, collections.Mapping):
if 'error' in ret:
salt.utils.error.raise_error(**ret['error'])
return ret |
java | public static boolean containsOptionWithMatching(final List<?> options, final Object data) {
if (options != null) {
for (Object option : options) {
if (option instanceof OptionGroup) {
List<?> groupOptions = ((OptionGroup) option).getOptions();
if (groupOptions != null) {
for (Object nestedOption : groupOptions) {
if (isEqualWithMatching(nestedOption, data)) {
return true;
}
}
}
} else if (isEqualWithMatching(option, data)) {
return true;
}
}
}
return false;
} |
python | def extract_user_keywords_generator(twitter_lists_gen, lemmatizing="wordnet"):
"""
Based on the user-related lists I have downloaded, annotate the users.
Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Yields: - user_twitter_id: A Twitter user id.
- user_annotation: A python dictionary that contains two dicts:
* bag_of_lemmas: Maps emmas to multiplicity.
* lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
"""
####################################################################################################################
# Extract keywords serially.
####################################################################################################################
for user_twitter_id, twitter_lists_list in twitter_lists_gen:
if twitter_lists_list is not None:
if "lists" in twitter_lists_list.keys():
twitter_lists_list = twitter_lists_list["lists"]
bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_list, lemmatizing)
for lemma, keywordbag in lemma_to_keywordbag.items():
lemma_to_keywordbag[lemma] = dict(keywordbag)
lemma_to_keywordbag = dict(lemma_to_keywordbag)
user_annotation = dict()
user_annotation["bag_of_lemmas"] = bag_of_lemmas
user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag
yield user_twitter_id, user_annotation |
java | @Restricted(NoExternalUse.class)
public boolean hasFreshToken(@Nonnull User user, @Nullable ApiTokenProperty.TokenInfoAndStats legacyStats) {
if (legacyStats == null) {
return false;
}
ApiTokenProperty apiTokenProperty = user.getProperty(ApiTokenProperty.class);
return apiTokenProperty.getTokenList().stream()
.filter(token -> !token.isLegacy)
.anyMatch(token -> {
Date creationDate = token.creationDate;
Date lastUseDate = legacyStats.lastUseDate;
if (lastUseDate == null) {
lastUseDate = legacyStats.creationDate;
}
return creationDate != null && lastUseDate != null && creationDate.after(lastUseDate);
});
} |
python | def hsps(self):
"""
Get all HSPs for all the alignments for all titles.
@return: A generator yielding L{dark.hsp.HSP} instances.
"""
return (hsp for titleAlignments in self.values()
for alignment in titleAlignments for hsp in alignment.hsps) |
java | private void ssMintroSort(int PA, int first, int last, int depth) {
final int STACK_SIZE = SS_MISORT_STACKSIZE;
StackElement[] stack = new StackElement[STACK_SIZE];
int Td;// T ptr
int a, b, c, d, e, f;// SA ptr
int s, t;
int ssize;
int limit;
int v, x = 0;
for (ssize = 0, limit = ssIlg(last - first); ; ) {
if ((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
if (1 < (last - first)) {
ssInsertionSort(PA, first, last, depth);
}
if (ssize > 0) {
StackElement se = stack[--ssize];
first = se.a;
last = se.b;
depth = se.c;
limit = se.d;
} else {
return;
}
continue;
}
Td = depth;
if (limit-- == 0) {
ssHeapSort(Td, PA, first, last - first);
}
if (limit < 0) {
for (a = first + 1, v = T[start + Td + SA[PA + SA[first]]]; a < last; ++a) {
if ((x = T[start + Td + SA[PA + SA[a]]]) != v) {
if (1 < (a - first)) {
break;
}
v = x;
first = a;
}
}
if (T[start + Td + SA[PA + SA[first]] - 1] < v) {
first = ssPartition(PA, first, a, depth);
}
if ((a - first) <= (last - a)) {
if (1 < (a - first)) {
stack[ssize++] = new StackElement(a, last, depth, -1);
last = a;
depth += 1;
limit = ssIlg(a - first);
} else {
first = a;
limit = -1;
}
} else {
if (1 < (last - a)) {
stack[ssize++] = new StackElement(first, a, depth + 1, ssIlg(a
- first));
first = a;
limit = -1;
} else {
last = a;
depth += 1;
limit = ssIlg(a - first);
}
}
continue;
}
// choose pivot
a = ssPivot(Td, PA, first, last);
v = T[start + Td + SA[PA + SA[a]]];
swapInSA(first, a);
// partition
for (b = first; (++b < last) && ((x = T[start + Td + SA[PA + SA[b]]]) == v); ) {
}
if (((a = b) < last) && (x < v)) {
for (; (++b < last) && ((x = T[start + Td + SA[PA + SA[b]]]) <= v); ) {
if (x == v) {
swapInSA(b, a);
++a;
}
}
}
for (c = last; (b < --c) && ((x = T[start + Td + SA[PA + SA[c]]]) == v); ) {
}
if ((b < (d = c)) && (x > v)) {
for (; (b < --c) && ((x = T[start + Td + SA[PA + SA[c]]]) >= v); ) {
if (x == v) {
swapInSA(c, d);
--d;
}
}
}
for (; b < c; ) {
swapInSA(b, c);
for (; (++b < c) && ((x = T[start + Td + SA[PA + SA[b]]]) <= v); ) {
if (x == v) {
swapInSA(b, a);
++a;
}
}
for (; (b < --c) && ((x = T[start + Td + SA[PA + SA[c]]]) >= v); ) {
if (x == v) {
swapInSA(c, d);
--d;
}
}
}
if (a <= d) {
c = b - 1;
if ((s = a - first) > (t = b - a)) {
s = t;
}
for (e = first, f = b - s; 0 < s; --s, ++e, ++f) {
swapInSA(e, f);
}
if ((s = d - c) > (t = last - d - 1)) {
s = t;
}
for (e = b, f = last - s; 0 < s; --s, ++e, ++f) {
swapInSA(e, f);
}
a = first + (b - a);
c = last - (d - c);
b = (v <= T[start + Td + SA[PA + SA[a]] - 1]) ? a : ssPartition(PA, a, c,
depth);
if ((a - first) <= (last - c)) {
if ((last - c) <= (c - b)) {
stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
stack[ssize++] = new StackElement(c, last, depth, limit);
last = a;
} else if ((a - first) <= (c - b)) {
stack[ssize++] = new StackElement(c, last, depth, limit);
stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
last = a;
} else {
stack[ssize++] = new StackElement(c, last, depth, limit);
stack[ssize++] = new StackElement(first, a, depth, limit);
first = b;
last = c;
depth += 1;
limit = ssIlg(c - b);
}
} else {
if ((a - first) <= (c - b)) {
stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
stack[ssize++] = new StackElement(first, a, depth, limit);
first = c;
} else if ((last - c) <= (c - b)) {
stack[ssize++] = new StackElement(first, a, depth, limit);
stack[ssize++] = new StackElement(b, c, depth + 1, ssIlg(c - b));
first = c;
} else {
stack[ssize++] = new StackElement(first, a, depth, limit);
stack[ssize++] = new StackElement(c, last, depth, limit);
first = b;
last = c;
depth += 1;
limit = ssIlg(c - b);
}
}
} else {
limit += 1;
if (T[start + Td + SA[PA + SA[first]] - 1] < v) {
first = ssPartition(PA, first, last, depth);
limit = ssIlg(last - first);
}
depth += 1;
}
}
} |
python | def get_junctions_string(self):
"""Get a string representation of the junctions. This is almost identical to a previous function.
:return: string representation of junction
:rtype: string
"""
self._initialize()
return ';'.join([x.get_range_string() for x in self.junctions]) |
python | def subject(self):
"""The certificates subject as :py:class:`~django_ca.subject.Subject`."""
return Subject([(s.oid, s.value) for s in self.x509.subject]) |
python | def create_telnet_shell(shell, loop=None):
"""
Run a shell application with a telnet frontend
:param application: An EmbedShell instance
:param loop: The event loop
:returns: Telnet server
"""
if loop is None:
loop = asyncio.get_event_loop()
def factory(reader, writer):
return ShellConnection(reader, writer, shell, loop)
return AsyncioTelnetServer(binary=True, echo=True, naws=True, connection_factory=factory) |
java | public final void addLayerListener(MapLayerListener listener) {
if (this.listeners == null) {
this.listeners = new ListenerCollection<>();
}
this.listeners.add(MapLayerListener.class, listener);
} |
java | public static boolean stringInPatterns(String s, List<Pattern> patterns) {
for (Pattern pattern : patterns) {
if (pattern.matcher(s).matches()) {
return true;
}
}
return false;
} |
python | def m(name='', **kwargs):
"""
Print out memory usage at this point in time
http://docs.python.org/2/library/resource.html
http://stackoverflow.com/a/15448600/5006
http://stackoverflow.com/questions/110259/which-python-memory-profiler-is-recommended
"""
with Reflect.context(**kwargs) as r:
kwargs["name"] = name
instance = M_CLASS(r, stream, **kwargs)
instance() |
python | def _get_present_locations(match_traversals):
"""Return the set of locations and non-optional locations present in the given match traversals.
When enumerating the possibilities for optional traversals,
the resulting match traversals may have sections of the query omitted.
These locations will not be included in the returned `present_locations`.
All of the above locations that are not optional traverse locations
will be included in present_non_optional_locations.
Args:
match_traversals: one possible list of match traversals generated from a query
containing @optional traversal(s)
Returns:
tuple (present_locations, present_non_optional_locations):
- present_locations: set of all locations present in the given match traversals
- present_non_optional_locations: set of all locations present in the match traversals
that are not reached through optional traverses.
Guaranteed to be a subset of present_locations.
"""
present_locations = set()
present_non_optional_locations = set()
for match_traversal in match_traversals:
for step in match_traversal:
if step.as_block is not None:
location_name, _ = step.as_block.location.get_location_name()
present_locations.add(location_name)
if isinstance(step.root_block, Traverse) and not step.root_block.optional:
present_non_optional_locations.add(location_name)
if not present_non_optional_locations.issubset(present_locations):
raise AssertionError(u'present_non_optional_locations {} was not a subset of '
u'present_locations {}. THis hould never happen.'
.format(present_non_optional_locations, present_locations))
return present_locations, present_non_optional_locations |
java | public Waiter<DescribeTrainingJobRequest> trainingJobCompletedOrStopped() {
return new WaiterBuilder<DescribeTrainingJobRequest, DescribeTrainingJobResult>()
.withSdkFunction(new DescribeTrainingJobFunction(client))
.withAcceptors(new TrainingJobCompletedOrStopped.IsCompletedMatcher(), new TrainingJobCompletedOrStopped.IsStoppedMatcher(),
new TrainingJobCompletedOrStopped.IsFailedMatcher(), new TrainingJobCompletedOrStopped.IsValidationExceptionMatcher())
.withDefaultPollingStrategy(new PollingStrategy(new MaxAttemptsRetryStrategy(180), new FixedDelayStrategy(120)))
.withExecutorService(executorService).build();
} |
java | public static BufferedReader asBufferedReader(String fileName) throws IOException {
Validate.notBlank(fileName, "filename is blank");
return asBufferedReader(getPath(fileName));
} |
java | @Override
public void addTags(ExecutableElement method, Content methodDocTree) {
writer.addTagsInfo(method, methodDocTree);
} |
python | def discrete(self):
"""
A sequence of connected vertices in space, corresponding to
self.paths.
Returns
---------
discrete : (len(self.paths),)
A sequence of (m*, dimension) float
"""
discrete = np.array([self.discretize_path(i)
for i in self.paths])
return discrete |
python | def log(self, n=None, **kwargs):
"""
Run the repository log command
Returns:
str: output of log command (``git log -n <n> <--kwarg=value>``)
"""
kwargs['format'] = kwargs.pop('template', self.template)
cmd = ['git', 'log']
if n:
cmd.append('-n%d' % n)
cmd.extend(
(('--%s=%s' % (k, v))
for (k, v) in iteritems(kwargs)))
try:
output = self.sh(cmd, shell=False)
if "fatal: bad default revision 'HEAD'" in output:
return output
return output
except Exception as e:
e
return |
java | @Override
public void absolute(final int localPointer) {
if (localPointer < 0 || localPointer >= rows.size()) {
throw new IndexOutOfBoundsException("INVALID POINTER LOCATION: " + localPointer);
}
pointer = localPointer;
currentRecord = new RowRecord(rows.get(pointer), metaData, parser.isColumnNamesCaseSensitive(), pzConvertProps, strictNumericParse, upperCase,
lowerCase, parser.isNullEmptyStrings());
} |
java | public static void setDevelopmentMode(boolean enable)
{
if (TraceComponent.isAnyTracingEnabled() && ejbTc.isDebugEnabled())
Tr.debug(ejbTc, "setDevelopmentMode : " + enable);
svDevelopmentMode = enable;
} |
java | public int getNumberOfOutputGroupVertices() {
int retVal = 0;
final Iterator<ManagementGroupVertex> it = this.groupVertices.iterator();
while (it.hasNext()) {
if (it.next().isOutputVertex()) {
++retVal;
}
}
return retVal;
} |
java | public Set<String> postProcessingFields() {
Set<String> fields = new LinkedHashSet<>();
must.forEach(condition -> fields.addAll(condition.postProcessingFields()));
should.forEach(condition -> fields.addAll(condition.postProcessingFields()));
return fields;
} |
python | def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts |
java | private int append(FA add) {
int relocation;
int idx;
relocation = used;
ensureCapacity(used + add.used);
for (idx = 0; idx < add.used; idx++) {
states[relocation + idx] =
new State(add.states[idx], relocation);
}
used += add.used;
return relocation;
} |
python | def has_path(nodes, A, B):
r"""Test if nodes from a breadth_first_order search lead from A to
B.
Parameters
----------
nodes : array_like
Nodes from breadth_first_oder_seatch
A : array_like
The set of educt states
B : array_like
The set of product states
Returns
-------
has_path : boolean
True if there exists a path, else False
"""
x1 = np.intersect1d(nodes, A).size > 0
x2 = np.intersect1d(nodes, B).size > 0
return x1 and x2 |
java | public List<T> elementList() {
if (m_cache != null) {
return m_cache;
}
if (m_relativeOrdered) {
List<T> objectList = new ArrayList<T>();
Iterator<CmsIdObjectElement<T>> itObjs = m_orderedObjectList.iterator();
while (itObjs.hasNext()) {
CmsIdObjectElement<T> object = itObjs.next();
objectList.add(object.getObject());
}
m_cache = Collections.unmodifiableList(objectList);
} else {
m_cache = Collections.unmodifiableList(m_objectList);
}
return m_cache;
} |
python | def do_get(self, uri):
"""Helps to make get requests
Args:
uri: URI of the resource
Returns:
Returns: Returns the resource data
"""
self.validate_resource_uri(uri)
return self._connection.get(uri) |
java | public static String getCanonicalSMILESForPolymer(PolymerNotation polymer) throws BuilderMoleculeException,
HELM2HandledException, CTKSmilesException, CTKException, NotationException, ChemistryException {
AbstractMolecule molecule = BuilderMolecule.buildMoleculefromSinglePolymer(polymer).getMolecule();
molecule = BuilderMolecule.mergeRgroups(molecule);
return Chemistry.getInstance().getManipulator().canonicalize(Chemistry.getInstance().getManipulator()
.convertMolecule(molecule, AbstractChemistryManipulator.StType.SMILES));
} |
java | @Override
public boolean setPortletPreferences(List<IPortletPreference> portletPreferences) {
if (portletPreferences == null) {
final boolean modified = !this.portletPreferences.isEmpty();
this.portletPreferences.clear();
return modified;
}
final boolean modified = !this.portletPreferences.equals(portletPreferences);
this.portletPreferences = portletPreferences;
return modified;
} |
python | def get_hidden_members(self, user=None):
"""Get the members that you do not have permission to view.
Returns: List of members hidden based on their permission preferences
"""
hidden_members = []
for member in self.members.all():
show = False
if member.can_view_eighth:
show = member.can_view_eighth
if not show and user and user.is_eighth_admin:
show = True
if not show and user and user.is_teacher:
show = True
if not show and member == user:
show = True
if not show:
hidden_members.append(member)
return hidden_members |
java | @Override
public CommerceAddressRestriction findByC_C_Last(long classNameId,
long classPK,
OrderByComparator<CommerceAddressRestriction> orderByComparator)
throws NoSuchAddressRestrictionException {
CommerceAddressRestriction commerceAddressRestriction = fetchByC_C_Last(classNameId,
classPK, orderByComparator);
if (commerceAddressRestriction != null) {
return commerceAddressRestriction;
}
StringBundler msg = new StringBundler(6);
msg.append(_NO_SUCH_ENTITY_WITH_KEY);
msg.append("classNameId=");
msg.append(classNameId);
msg.append(", classPK=");
msg.append(classPK);
msg.append("}");
throw new NoSuchAddressRestrictionException(msg.toString());
} |
python | def multi_split(txt, delims):
"""
split by multiple delimiters
"""
res = [txt]
for delimChar in delims:
txt, res = res, []
for word in txt:
if len(word) > 1:
res += word.split(delimChar)
return res |
java | private void obtainScaledEdge(@NonNull final TypedArray typedArray) {
int defaultValue = Edge.VERTICAL.getValue();
Edge scaledEdge = Edge.fromValue(
typedArray.getInt(R.styleable.SquareImageView_scaledEdge, defaultValue));
setScaledEdge(scaledEdge);
} |
java | public static void escapeAndWriteDelimitedColumn(String str,
char delimiter, Writer writer) throws IOException {
if (str == null) {
escapeAndWriteDelimitedColumn("NULL", delimiter, writer);
} else if (containsNone(str, SEARCH_CHARS) && str.indexOf(delimiter) < 0) {
writer.write(str);
} else {
writer.write(QUOTE);
writeStr(str, writer);
writer.write(QUOTE);
}
} |
python | def _start_execution(self, args, stdin, stdout, stderr, env, cwd, temp_dir, cgroups,
parent_setup_fn, child_setup_fn, parent_cleanup_fn):
"""Actually start the tool and the measurements.
@param parent_setup_fn a function without parameters that is called in the parent process
immediately before the tool is started
@param child_setup_fn a function without parameters that is called in the child process
before the tool is started
@param parent_cleanup_fn a function that is called in the parent process
immediately after the tool terminated, with three parameters:
the result of parent_setup_fn, the result of the executed process as ProcessExitCode,
and the base path for looking up files as parameter values
@return: a tuple of PID of process and a blocking function, which waits for the process
and a triple of the exit code and the resource usage of the process
and the result of parent_cleanup_fn (do not use os.wait)
"""
def pre_subprocess():
# Do some other setup the caller wants.
child_setup_fn()
# put us into the cgroup(s)
pid = os.getpid()
cgroups.add_task(pid)
# Set HOME and TMPDIR to fresh directories.
tmp_dir = os.path.join(temp_dir, "tmp")
home_dir = os.path.join(temp_dir, "home")
self._create_dirs_in_temp_dir(tmp_dir, home_dir)
env["HOME"] = home_dir
env["TMPDIR"] = tmp_dir
env["TMP"] = tmp_dir
env["TEMPDIR"] = tmp_dir
env["TEMP"] = tmp_dir
logging.debug("Executing run with $HOME and $TMPDIR below %s.", temp_dir)
args = self._build_cmdline(args, env=env)
parent_setup = parent_setup_fn()
p = subprocess.Popen(args,
stdin=stdin,
stdout=stdout, stderr=stderr,
env=env, cwd=cwd,
close_fds=True,
preexec_fn=pre_subprocess)
def wait_and_get_result():
exitcode, ru_child = self._wait_for_process(p.pid, args[0])
parent_cleanup = parent_cleanup_fn(
parent_setup, util.ProcessExitCode.from_raw(exitcode), "")
return exitcode, ru_child, parent_cleanup
return p.pid, wait_and_get_result |
java | public static route[] get(nitro_service service, options option) throws Exception{
route obj = new route();
route[] response = (route[])obj.get_resources(service,option);
return response;
} |
java | public final void finishTransliteration(Replaceable text,
Position index) {
index.validate(text.length());
filteredTransliterate(text, index, false, true);
} |
python | def calculate_overlap(self):
"""Create the array that describes how junctions overlap"""
overs = []
if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] # if they dont overlap wont find anything
for i in range(0,len(self.j1)):
for j in range(0,len(self.j2)):
if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance):
overs.append([i,j])
return overs |
python | def focusOutEvent(self, event):
""" The default 'focusOutEvent' implementation.
"""
widget = self.widget
type(widget).focusOutEvent(widget, event)
self.declaration.focus_lost() |
java | public Observable<StorageAccountInfoInner> getStorageAccountAsync(String resourceGroupName, String accountName, String storageAccountName) {
return getStorageAccountWithServiceResponseAsync(resourceGroupName, accountName, storageAccountName).map(new Func1<ServiceResponse<StorageAccountInfoInner>, StorageAccountInfoInner>() {
@Override
public StorageAccountInfoInner call(ServiceResponse<StorageAccountInfoInner> response) {
return response.body();
}
});
} |
java | private void fillDetailMimetypes(CmsListItem item, String detailId) {
CmsSearchManager searchManager = OpenCms.getSearchManager();
StringBuffer html = new StringBuffer();
String doctypeName = (String)item.get(LIST_COLUMN_NAME);
CmsSearchDocumentType docType = searchManager.getDocumentTypeConfig(doctypeName);
// output of mime types
Iterator<String> itMimetypes = docType.getMimeTypes().iterator();
html.append("<ul>\n");
while (itMimetypes.hasNext()) {
html.append(" <li>\n").append(" ").append(itMimetypes.next()).append("\n");
html.append(" </li>");
}
html.append("</ul>\n");
item.set(detailId, html.toString());
} |
java | @Override
public com.liferay.commerce.wish.list.model.CommerceWishList getCommerceWishListByUuidAndGroupId(
String uuid, long groupId)
throws com.liferay.portal.kernel.exception.PortalException {
return _commerceWishListLocalService.getCommerceWishListByUuidAndGroupId(uuid,
groupId);
} |
java | public boolean connectIfPossible( MonitoringPoint monitoringPoint ) {
// check if the other point has this as related id
if (ID == monitoringPoint.getRelatedID()) {
pfafRelatedMonitoringPointsTable.put(monitoringPoint.getPfatstetterNumber().toString(),
monitoringPoint);
return true;
}
return false;
} |
python | def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import sys
import os
import pysftp
import davitpy
if user is None:
user = os.environ['DBREADUSER']
if password is None:
password = os.environ['DBREADPASS']
with pysftp.Connection(
os.environ['VTDB'],
username=user,
password=password) as sftp:
for date in date_array:
myDir = '/data/'+date.strftime("%Y")+'/grdex/'+tag+'/'
fname = date.strftime("%Y%m%d")+'.' + tag + '.grdex'
local_fname = fname+'.bz2'
saved_fname = os.path.join(data_path,local_fname)
full_fname = os.path.join(data_path,fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
sftp.get(myDir+local_fname, saved_fname)
os.system('bunzip2 -c '+saved_fname+' > '+ full_fname)
os.system('rm ' + saved_fname)
except IOError:
print('File not available for '+date.strftime('%D'))
return |
python | def getall(self, table):
"""
Get all rows values for a table
"""
try:
self._check_db()
except Exception as e:
self.err(e, "Can not connect to database")
return
if table not in self.db.tables:
self.warning("The table " + table + " does not exists")
return
try:
res = self.db[table].all()
df = pd.DataFrame(list(res))
return df
except Exception as e:
self.err(e, "Error retrieving data in table") |
java | @Deprecated
public static SelectColumn createExcludes(String[] cols, String... columns) {
return new SelectColumn(Utility.append(cols, columns), true);
} |
java | @Deprecated
public static <I extends Request, O extends Response>
Function<Client<I, O>, LoggingClient<I, O>> newDecorator(LogLevel level) {
return delegate -> new LoggingClient<>(delegate, level);
} |
python | def delete_service_certificate(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
Delete a specific certificate associated with the service
CLI Examples:
.. code-block:: bash
salt-cloud -f delete_service_certificate my-azure name=my_service_certificate \\
thumbalgorithm=sha1 thumbprint=0123456789ABCDEF
'''
if call != 'function':
raise SaltCloudSystemExit(
'The delete_service_certificate function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if 'name' not in kwargs:
raise SaltCloudSystemExit('A name must be specified as "name"')
if 'thumbalgorithm' not in kwargs:
raise SaltCloudSystemExit('A thumbalgorithm must be specified as "thumbalgorithm"')
if 'thumbprint' not in kwargs:
raise SaltCloudSystemExit('A thumbprint must be specified as "thumbprint"')
if not conn:
conn = get_conn()
try:
data = conn.delete_service_certificate(
kwargs['name'],
kwargs['thumbalgorithm'],
kwargs['thumbprint'],
)
return {'Success': 'The service certificate was successfully deleted'}
except AzureMissingResourceHttpError as exc:
raise SaltCloudSystemExit('{0}: {1}'.format(kwargs['name'], exc.message)) |
java | public static String concat(Object... params) {
StringBuffer finalMessage = new StringBuffer();
for (Object object : params) {
finalMessage.append(JKObjectUtil.toString(object, true));
}
return finalMessage.toString();
} |
java | public final String getJmsType() {
if (getHdr2().getChoiceField(JsHdr2Access.JMSTYPE) == JsHdr2Access.IS_JMSTYPE_EMPTY) {
return getDerivedJmsType();
}
else {
return (String) getHdr2().getField(JsHdr2Access.JMSTYPE_DATA);
}
} |
python | def init_config(self, app):
"""Initialize config."""
app.config.setdefault(
'LOGGING_FS_LEVEL',
'DEBUG' if app.debug else 'WARNING'
)
for k in dir(config):
if k.startswith('LOGGING_FS'):
app.config.setdefault(k, getattr(config, k))
# Support injecting instance path and/or sys.prefix
if app.config['LOGGING_FS_LOGFILE'] is not None:
app.config['LOGGING_FS_LOGFILE'] = \
app.config['LOGGING_FS_LOGFILE'].format(
instance_path=app.instance_path,
sys_prefix=sys.prefix,
) |
python | def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1 |
java | public static Pair<AlluxioConfiguration, PathConfiguration> loadClusterAndPathDefaults(
InetSocketAddress address, AlluxioConfiguration clusterConf, PathConfiguration pathConf)
throws AlluxioStatusException {
if (shouldLoadClusterConfiguration(clusterConf)) {
GetConfigurationPResponse response = loadConfiguration(address, clusterConf);
clusterConf = loadClusterConfiguration(response, clusterConf);
pathConf = loadPathConfiguration(response, clusterConf);
}
return new Pair<>(clusterConf, pathConf);
} |
python | def login(self, role, jwt, subscription_id=None, resource_group_name=None, vm_name=None, vmss_name=None, use_token=True,
mount_point=DEFAULT_MOUNT_POINT):
"""Fetch a token.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT signature
to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: Name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: Signed JSON Web Token (JWT) from Azure MSI.
:type jwt: str | unicode
:param subscription_id: The subscription ID for the machine that generated the MSI token. This information can
be obtained through instance metadata.
:type subscription_id: str | unicode
:param resource_group_name: The resource group for the machine that generated the MSI token. This information
can be obtained through instance metadata.
:type resource_group_name: str | unicode
:param vm_name: The virtual machine name for the machine that generated the MSI token. This information can be
obtained through instance metadata. If vmss_name is provided, this value is ignored.
:type vm_name: str | unicode
:param vmss_name: The virtual machine scale set name for the machine that generated the MSI token. This
information can be obtained through instance metadata.
:type vmss_name: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
'role': role,
'jwt': jwt,
}
if subscription_id is not None:
params['subscription_id'] = subscription_id
if resource_group_name is not None:
params['resource_group_name'] = resource_group_name
if vm_name is not None:
params['vm_name'] = vm_name
if vmss_name is not None:
params['vmss_name'] = vmss_name
api_path = '/v1/auth/{mount_point}/login'.format(mount_point=mount_point)
response = self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
return response |
python | def __initialize_ui(self):
"""
Initializes the Widget ui.
"""
self.setAutoScroll(True)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setIndentation(self.__tree_view_indentation)
self.setDragDropMode(QAbstractItemView.DragOnly)
self.setHeaderHidden(True)
self.__set_default_ui_state()
# Signals / Slots.
self.model().modelReset.connect(self.__set_default_ui_state) |
java | public static void trackTrustedMultifactorAuthenticationAttribute(
final Authentication authn,
final String attributeName) {
val newAuthn = DefaultAuthenticationBuilder.newInstance(authn)
.addAttribute(attributeName, Boolean.TRUE)
.build();
LOGGER.debug("Updated authentication session to remember trusted multifactor record via [{}]", attributeName);
authn.update(newAuthn);
} |
java | public static Grid getInstance(String configFile, String propertiesFile) throws InterruptedException {
return getInstance(configFile == null ? null : new FileSystemResource(configFile), propertiesFile);
} |
python | def _determine_monetary_account_id(cls, monetary_account_id=None):
"""
:type monetary_account_id: int
:rtype: int
"""
if monetary_account_id is None:
return context.BunqContext.user_context().primary_monetary_account.id_
return monetary_account_id |
java | protected boolean contains(CmsPublishJobInfoBean publishJob) {
List<CmsPublishJobInfoBean> l = OpenCms.getMemoryMonitor().getAllCachedPublishJobs();
if (l != null) {
for (int i = 0; i < l.size(); i++) {
CmsPublishJobInfoBean b = l.get(i);
if (b == publishJob) {
return true;
}
}
}
return false;
} |
python | def _end_channel(self, channel):
"""
Soft end of ssh channel. End the writing thread as soon as the message queue is empty.
"""
self.stop_on_empty_queue[channel] = True
# by joining the we wait until its loop finishes.
# it won't loop forever since we've set self.stop_on_empty_queue=True
write_thread = self.thread_write_instances[channel]
thread_join_non_blocking(write_thread) |
python | def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
"""Parses a cell according to its cell.value_type."""
# pylint: disable=too-many-return-statements
if cell_mode == CellMode.cooked:
if cell.covered or cell.value_type is None or cell.value is None:
return None
vtype = cell.value_type
if vtype == 'string':
return cell.value
if vtype == 'float' or vtype == 'percentage' or vtype == 'currency':
return cell.value
if vtype == 'boolean':
return cell.value
if vtype == 'date':
date_tuple = tuple([int(i) if i is not None else 0 \
for i in _DATE_REGEX.match(cell.value).groups()])
return self.tuple_to_datetime(date_tuple)
if vtype == 'time':
hour, minute, second = _TIME_REGEX.match(cell.value).groups()
# TODO: This kills off the microseconds
date_tuple = (0, 0, 0, int(hour), int(minute), round(float(second)))
return self.tuple_to_datetime(date_tuple)
raise ValueError("Unhandled cell type: {0}".format(vtype))
else:
return cell |
python | def query(conn_type, option, post_data=None):
'''
Execute the HTTP request to the API
'''
if ticket is None or csrf is None or url is None:
log.debug('Not authenticated yet, doing that now..')
_authenticate()
full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option)
log.debug('%s: %s (%s)', conn_type, full_url, post_data)
httpheaders = {'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'salt-cloud-proxmox'}
if conn_type == 'post':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.post(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'put':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.put(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'delete':
httpheaders['CSRFPreventionToken'] = csrf
response = requests.delete(full_url, verify=verify_ssl,
data=post_data,
cookies=ticket,
headers=httpheaders)
elif conn_type == 'get':
response = requests.get(full_url, verify=verify_ssl,
cookies=ticket)
response.raise_for_status()
try:
returned_data = response.json()
if 'data' not in returned_data:
raise SaltCloudExecutionFailure
return returned_data['data']
except Exception:
log.error('Error in trying to process JSON')
log.error(response) |
java | @Override
protected void closeJedisCommands(JedisCommands jedisCommands) {
if (jedisCommands instanceof Jedis) {
((Jedis) jedisCommands).close();
} else
throw new IllegalArgumentException("Argument is not of type [" + Jedis.class + "]!");
} |
java | public static double updateDouble(double value, double range) {
range = range == 0 ? 0.1 * value : range;
double min = value - range;
double max = value + range;
return nextDouble(min, max);
} |
java | void removeHandler( Object handlerRegistration )
{
// Through object's interface implementation
if( handlerRegistration instanceof DirectHandlerInfo )
{
DirectHandlerInfo info = (DirectHandlerInfo) handlerRegistration;
info.source.removePropertyChangedHandler( info.registrationObject );
return;
}
if( ! ( handlerRegistration instanceof HandlerInfo ) )
return;
HandlerInfo info = (HandlerInfo) handlerRegistration;
HashMap<String, ArrayList<PropertyChangedHandler>> handlersMap = PlatformSpecificProvider.get().getObjectMetadata( info.source );
if( handlersMap == null )
return;
ArrayList<PropertyChangedHandler> handlerList = handlersMap.get( info.propertyName );
if( handlerList == null )
return;
handlerList.remove( info.handler );
if( handlerList.isEmpty() )
handlersMap.remove( info.propertyName );
if( handlersMap.isEmpty() )
PlatformSpecificProvider.get().setObjectMetadata( info.source, null );
stats.statsRemovedRegistration( info );
info.handler = null;
info.propertyName = null;
info.source = null;
} |
java | public double getSquaredDistance(final DBIDRef id1, final DBIDRef id2) {
final int o1 = idmap.getOffset(id1), o2 = idmap.getOffset(id2);
return kernel[o1][o1] + kernel[o2][o2] - 2 * kernel[o1][o2];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.