_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278400
|
Mesh.add_fields
|
test
|
def add_fields(self, fields = None, **kwargs):
"""
Add the fields into the list of fields.
"""
if fields != None:
for field in fields:
self.fields.append(field)
|
python
|
{
"resource": ""
}
|
q278401
|
Mesh.check_elements
|
test
|
def check_elements(self):
"""
Checks element definitions.
"""
# ELEMENT TYPE CHECKING
existing_types = set(self.elements.type.argiope.values.flatten())
allowed_types = set(ELEMENTS.keys())
if (existing_types <= allowed_types) == False:
raise ValueError("Element types {0} not in know elements {1}".format(
existing_types - allowed_types, allowed_types))
print("<Elements: OK>")
|
python
|
{
"resource": ""
}
|
q278402
|
Mesh.space
|
test
|
def space(self):
"""
Returns the dimension of the embedded space of each element.
"""
return self.elements.type.argiope.map(
lambda t: ELEMENTS[t].space)
|
python
|
{
"resource": ""
}
|
q278403
|
Mesh.centroids_and_volumes
|
test
|
def centroids_and_volumes(self, sort_index = True):
"""
Returns a dataframe containing volume and centroids of all the elements.
"""
elements = self.elements
out = []
for etype, group in self.elements.groupby([("type", "argiope", "")]):
etype_info = ELEMENTS[etype]
simplices_info = etype_info.simplices
index = group.index
simplices_data = self.split(into = "simplices",
loc = index,
at = "coords")
simplices = simplices_data.values.reshape(
index.size,
simplices_info.shape[0],
simplices_info.shape[1],
3)
edges = simplices[:,:,1:] - simplices[:,:,:1]
simplices_centroids = simplices.mean(axis = 2)
if etype_info.space == 2:
simplices_volumes = np.linalg.norm(
np.cross(edges[:,:,0],
edges[:,:,1],
axis = 2),
axis = 2)/2.
elif etype_info.space == 3:
simplices_volumes = (np.cross(edges[:,:,0],
edges[:,:,1], axis = 2)
* edges[:,:, 2]).sum(axis = 2) / 6.
elements_volumes = simplices_volumes.sum(axis = 1)
elements_centroids = ((simplices_volumes.reshape(*simplices_volumes.shape, 1)
* simplices_centroids).sum(axis = 1)
/ elements_volumes.reshape(*elements_volumes.shape,1))
volumes_df = pd.DataFrame(index = index,
data = elements_volumes,
columns = pd.MultiIndex.from_product(
[["volume"], [""]]))
centroids_df = pd.DataFrame(index = index,
data = elements_centroids,
columns = pd.MultiIndex.from_product(
[["centroid"], ["x", "y", "z"]]))
out.append(pd.concat([volumes_df, centroids_df], axis = 1))
out = pd.concat(out)
if sort_index: out.sort_index(inplace = True)
return out.sort_index(axis= 1)
|
python
|
{
"resource": ""
}
|
q278404
|
Mesh.angles
|
test
|
def angles(self, zfill = 3):
"""
Returns the internal angles of all elements and the associated statistics
"""
elements = self.elements.sort_index(axis = 1)
etypes = elements[("type", "argiope")].unique()
out = []
for etype in etypes:
etype_info = ELEMENTS[etype]
angles_info = etype_info.angles
loc = elements[("type", "argiope", "")] == etype
index = elements.loc[loc].index
angles_data = self.split(into = "angles",
loc = loc,
at = "coords")
data = angles_data.values.reshape(index.size,
angles_info.shape[0],
angles_info.shape[1],
3)
edges = data[:,:,[0,2],:] - data[:,:,1:2,:]
edges /= np.linalg.norm(edges, axis = 3).reshape(
index.size, angles_info.shape[0], 2, 1)
angles = np.degrees(np.arccos((
edges[:,:,0] * edges[:,:,1]).sum(axis = 2)))
deviation = angles - etype_info.optimal_angles
angles_df = pd.DataFrame(index = index,
data = angles,
columns = pd.MultiIndex.from_product(
[["angles"], ["a" + "{0}".format(s).zfill(zfill)
for s in range(angles_info.shape[0])]]))
deviation_df = pd.DataFrame(index = index,
data = deviation,
columns = pd.MultiIndex.from_product(
[["deviation"], ["d" + "{0}".format(s).zfill(zfill)
for s in range(angles_info.shape[0])]]))
df = pd.concat([angles_df, deviation_df], axis = 1).sort_index(axis = 1)
df["stats", "max_angle"] = df.angles.max(axis = 1)
df["stats", "min_angle"] = df.angles.min(axis = 1)
df["stats", "max_angular_deviation"] = df.deviation.max(axis = 1)
df["stats", "min_angular_deviation"] = df.deviation.min(axis = 1)
df["stats", "max_abs_angular_deviation"] = abs(df.deviation).max(axis = 1)
df = df.sort_index(axis = 1)
out.append(df)
out = pd.concat(out).sort_index(axis = 1)
return out
|
python
|
{
"resource": ""
}
|
q278405
|
Mesh.edges
|
test
|
def edges(self, zfill = 3):
"""
Returns the aspect ratio of all elements.
"""
edges = self.split("edges", at = "coords").unstack()
edges["lx"] = edges.x[1]-edges.x[0]
edges["ly"] = edges.y[1]-edges.y[0]
edges["lz"] = edges.z[1]-edges.z[0]
edges["l"] = np.linalg.norm(edges[["lx", "ly", "lz"]], axis = 1)
edges = (edges.l).unstack()
edges.columns = pd.MultiIndex.from_product([["length"],
["e" + "{0}".format(s).zfill(zfill)
for s in np.arange(edges.shape[1])]])
edges[("stats", "lmax")] = edges.length.max(axis = 1)
edges[("stats", "lmin")] = edges.length.min(axis = 1)
edges[("stats", "aspect_ratio")] = edges.stats.lmax / edges.stats.lmin
return edges.sort_index(axis = 1)
|
python
|
{
"resource": ""
}
|
q278406
|
Mesh.stats
|
test
|
def stats(self):
"""
Returns mesh quality and geometric stats.
"""
cv = self.centroids_and_volumes()
angles = self.angles()
edges = self.edges()
return pd.concat([cv , angles[["stats"]], edges[["stats"]] ],
axis = 1).sort_index(axis = 1)
|
python
|
{
"resource": ""
}
|
q278407
|
Mesh.element_set_to_node_set
|
test
|
def element_set_to_node_set(self, tag):
"""
Makes a node set from an element set.
"""
nodes, elements = self.nodes, self.elements
loc = (elements.conn[elements[("sets", tag, "")]]
.stack().stack().unique())
loc = loc[loc != 0]
nodes[("sets", tag)] = False
nodes.loc[loc, ("sets", tag) ] = True
|
python
|
{
"resource": ""
}
|
q278408
|
Mesh.node_set_to_surface
|
test
|
def node_set_to_surface(self, tag):
"""
Converts a node set to surface.
"""
# Create a dummy node with label 0
nodes = self.nodes.copy()
dummy = nodes.iloc[0].copy()
dummy["coords"] *= np.nan
dummy["sets"] = True
nodes.loc[0] = dummy
# Getting element surfaces
element_surfaces= self.split("surfaces").unstack()
# killer hack !
surf = pd.DataFrame(
nodes.sets[tag].loc[element_surfaces.values.flatten()]
.values.reshape(element_surfaces.shape)
.prod(axis = 1)
.astype(np.bool),
index = element_surfaces.index).unstack().fillna(False)
for k in surf.keys():
self.elements["surfaces", tag, "f{0}".format(k[1]+1) ] = surf.loc[:, k]
|
python
|
{
"resource": ""
}
|
q278409
|
Mesh.surface_to_element_sets
|
test
|
def surface_to_element_sets(self, tag):
"""
Creates elements sets corresponding to a surface.
"""
surface = self.elements.surfaces[tag]
for findex in surface.keys():
if surface[findex].sum() != 0:
self.elements[("sets", "_SURF_{0}_FACE{1}"
.format(tag, findex[1:]), "")] = surface[findex]
|
python
|
{
"resource": ""
}
|
q278410
|
Mesh.fields_metadata
|
test
|
def fields_metadata(self):
"""
Returns fields metadata as a dataframe.
"""
return (pd.concat([f.metadata() for f in self.fields], axis = 1)
.transpose()
.sort_values(["step_num", "frame", "label", "position"]))
|
python
|
{
"resource": ""
}
|
q278411
|
MetaField.metadata
|
test
|
def metadata(self):
"""
Returns metadata as a dataframe.
"""
return pd.Series({
"part": self.part,
"step_num": self.step_num,
"step_label": self.step_label,
"frame": self.frame,
"frame_value": self.frame_value,
"label": self.label,
"position": self.position,
})
|
python
|
{
"resource": ""
}
|
q278412
|
Model.make_directories
|
test
|
def make_directories(self):
"""
Checks if required directories exist and creates them if needed.
"""
if os.path.isdir(self.workdir) == False: os.mkdir(self.workdir)
|
python
|
{
"resource": ""
}
|
q278413
|
Model.run_postproc
|
test
|
def run_postproc(self):
"""
Runs the post-proc script.
"""
t0 = time.time()
if self.verbose:
print('#### POST-PROCESSING "{0}" USING POST-PROCESSOR "{1}"'.format(self.label,
self.solver.upper()))
if self.solver == "abaqus":
command = '{0} viewer noGUI={1}_abqpp.py'.format(self.solver_path, self.label)
process = subprocess.Popen(
command,
cwd = self.workdir,
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
for line in iter(process.stdout.readline, b''):
line = line.rstrip().decode('utf8')
print(" ", line)
t1 = time.time()
if self.verbose:
print(' => POST-PROCESSED {0}: DURATION = {1:.2f}s >'.format(self.label,
t1 - t0))
|
python
|
{
"resource": ""
}
|
q278414
|
Part.run_gmsh
|
test
|
def run_gmsh(self):
"""
Makes the mesh using gmsh.
"""
argiope.utils.run_gmsh(gmsh_path = self.gmsh_path,
gmsh_space = self.gmsh_space,
gmsh_options = self.gmsh_options,
name = self.file_name + ".geo",
workdir = self.workdir)
self.mesh = argiope.mesh.read_msh(self.workdir + self.file_name + ".msh")
|
python
|
{
"resource": ""
}
|
q278415
|
read_history_report
|
test
|
def read_history_report(path, steps, x_name = None):
"""
Reads an history output report.
"""
data = pd.read_csv(path, delim_whitespace = True)
if x_name != None:
data[x_name] = data.X
del data["X"]
data["step"] = 0
t = 0.
for i in range(len(steps)):
dt = steps[i].duration
loc = data[data.t == t].index
if len(loc) == 2:
data.loc[loc[1]:, "step"] = i
t += dt
return data
|
python
|
{
"resource": ""
}
|
q278416
|
read_field_report
|
test
|
def read_field_report(path, data_flag = "*DATA", meta_data_flag = "*METADATA"):
"""
Reads a field output report.
"""
text = open(path).read()
mdpos = text.find(meta_data_flag)
dpos = text.find(data_flag)
mdata = io.StringIO( "\n".join(text[mdpos:dpos].split("\n")[1:]))
data = io.StringIO( "\n".join(text[dpos:].split("\n")[1:]))
data = pd.read_csv(data, index_col = 0)
data = data.groupby(data.index).mean()
mdata = pd.read_csv(mdata, sep = "=", header = None, index_col = 0)[1]
mdata = mdata.to_dict()
out = {}
out["step_num"] = int(mdata["step_num"])
out["step_label"] = mdata["step_label"]
out["frame"] = int(mdata["frame"])
out["frame_value"] = float(mdata["frame_value"])
out["part"] = mdata["instance"]
position_map = {"NODAL": "node",
"ELEMENT_CENTROID": "element",
"WHOLE_ELEMENT": "element"}
out["position"] = position_map[mdata["position"]]
out["label"] = mdata["label"]
out["data"] = data
field_class = getattr(argiope.mesh, mdata["argiope_class"])
return field_class(**out)
|
python
|
{
"resource": ""
}
|
q278417
|
list_to_string
|
test
|
def list_to_string(l = range(200), width = 40, indent = " "):
"""
Converts a list-like to string with given line width.
"""
l = [str(v) + "," for v in l]
counter = 0
out = "" + indent
for w in l:
s = len(w)
if counter + s > width:
out += "\n" + indent
counter = 0
out += w
counter += s
return out.strip(",")
|
python
|
{
"resource": ""
}
|
q278418
|
_equation
|
test
|
def _equation(nodes = (1, 2), dofs = (1, 1), coefficients = (1., 1.),
comment = None):
"""
Returns an Abaqus INP formated string for a given linear equation.
"""
N = len(nodes)
if comment == None:
out = ""
else:
out = "**EQUATION: {0}\n".format(comment)
out+= "*EQUATION\n {0}\n ".format(N)
out += "\n ".join([ ",".join([ str(nodes[i]),
str(int(dofs[i])),
str(coefficients[i]) ]) for i in range(N)])
return out
|
python
|
{
"resource": ""
}
|
q278419
|
_unsorted_set
|
test
|
def _unsorted_set(df, label, **kwargs):
"""
Returns a set as inp string with unsorted option.
"""
out = "*NSET, NSET={0}, UNSORTED\n".format(label)
labels = df.index.values
return out + argiope.utils.list_to_string(labels, **kwargs)
|
python
|
{
"resource": ""
}
|
q278420
|
PhaxioApi.parse_response
|
test
|
def parse_response(self, response):
"""Parses the API response and raises appropriate
errors if raise_errors was set to True
:param response: response from requests http call
:returns: dictionary of response
:rtype: dict
"""
payload = None
try:
if isinstance(response.json, collections.Callable):
payload = response.json()
else:
# json isn't callable in old versions of requests
payload = response.json
except ValueError:
# response does not have JSON content
payload = response.content
if not self._raise_errors:
return payload
else:
if response.status_code == 401:
raise AuthenticationError(payload['message'])
elif response.status_code == 500:
raise ServerError(payload['message'])
elif isinstance(payload, dict) and not payload['success']:
raise APIError(payload['message'])
else:
return payload
|
python
|
{
"resource": ""
}
|
q278421
|
PhaxioApi._get
|
test
|
def _get(self, method, **kwargs):
"""Builds the url for the specified method and arguments and returns
the response as a dictionary.
"""
payload = kwargs.copy()
payload['api_key'] = self.api_key
payload['api_secret'] = self.api_secret
to = payload.pop('to', None)
if to:
if isinstance(to, basestring):
payload['to'] = to
else:
# Presumably it's a list or tuple
for num_i, fax_num in enumerate(to):
payload['to[%d]' % num_i] = fax_num
files = payload.pop('files', [])
if not isinstance(files, (list, tuple)): files = (files,)
req_files = {}
for file_i, f in enumerate(files):
if isinstance(f, basestring):
req_files['filename[%d]' % file_i] = open(f, 'rb')
else:
f.seek(0)
req_files['filename[%d]' % file_i] = f
url = '%s/v%d/%s' % (self.BASE_URL, self.VERSION, method)
r = requests.post(url, data=payload, files=req_files)
return self.parse_response(r)
|
python
|
{
"resource": ""
}
|
q278422
|
write_xy_report
|
test
|
def write_xy_report(odb, path, tags, columns, steps):
"""
Writes a xy_report based on xy data.
"""
xyData = [session.XYDataFromHistory(name = columns[i],
odb = odb,
outputVariableName = tags[i],
steps = steps)
for i in xrange(len(tags))]
session.xyReportOptions.setValues(numDigits=8, numberFormat=SCIENTIFIC)
session.writeXYReport(fileName=path, appendMode=OFF, xyData=xyData)
|
python
|
{
"resource": ""
}
|
q278423
|
write_field_report
|
test
|
def write_field_report(odb, path, label, argiope_class, variable, instance, output_position,
step = -1, frame = -1, sortItem='Node Label'):
"""
Writes a field report and rewrites it in a cleaner format.
"""
stepKeys = get_steps(odb)
step = xrange(len(stepKeys))[step]
frame = xrange(get_frames(odb, stepKeys[step]))[frame]
nf = NumberFormat(numDigits=9,
precision=0,
format=SCIENTIFIC)
session.fieldReportOptions.setValues(
printTotal=OFF,
printMinMax=OFF,
numberFormat=nf)
leaf = dgo.LeafFromPartInstance(
partInstanceName = instance)
session.viewports['Viewport: 1'].odbDisplay.displayGroup.replace(leaf=leaf)
session.writeFieldReport(
fileName = path,
append = OFF,
sortItem = sortItem,
odb = odb,
step = step,
frame = frame,
outputPosition = output_position,
variable = variable)
lines = [line.strip() for line in open(path).readlines()]
isdata = -1
data = []
for line in lines:
if isdata == 1:
if len(line) == 0:
isdata -= 1
else:
data.append(line)
elif isdata < 1:
if line.startswith("--"):
isdata += 1
data = "\n".join([",".join(line.split()) for line in data if len(line) != 0])
# HEADER
header = str(output_position).lower() + ","
header += ",".join([v[1] for v in variable[0][2]]) + "\n"
# METADATA
metadata = (
("label", label),
("argiope_class", argiope_class) ,
("odb", odb.path),
("instance", instance),
("position", output_position),
("step_num", step),
("step_label", stepKeys[step]),
("frame", frame),
("frame_value", odb.steps[stepKeys[step]].frames[frame].frameValue)
)
out = "*METADATA\n{0}\n*DATA\n{1}".format(
"\n".join(["{0}={1}".format(k, v) for k, v in metadata]),
header + data)
open(path, "w").write(out)
|
python
|
{
"resource": ""
}
|
q278424
|
list
|
test
|
def list(component_type):
"""List components that are available on your machine"""
config_loader = initialise_component_loader()
component_types = sorted({
"displays": lambda: config_loader.load_by_type(ComponentType.DISPLAY),
"datafeeds": lambda: config_loader.load_by_type(ComponentType.DATA_FEED),
"filters": lambda: config_loader.load_by_type(ComponentType.FILTER),
"notifications": lambda: config_loader.load_by_type(ComponentType.NOTIFICATION)
}.items(), key=lambda t: t[0])
def print_ids(creators):
ids = {c.id_key_value[1] if hasattr(c, "id_key_value") else c.get_id() for c in creators}
for i in sorted(ids):
click.echo(" - %s" % i)
for k, v in component_types:
if component_type == k or component_type == "all":
click.echo("Available %s:" % k)
print_ids(v())
if component_type == "all":
click.echo("")
|
python
|
{
"resource": ""
}
|
q278425
|
Descriptor.err_msg
|
test
|
def err_msg(self, instance, value):
"""Return an error message for use in exceptions thrown by
subclasses.
"""
if not hasattr(self, "name"):
# err_msg will be called by the composed descriptor
return ""
return (
"Attempted to set the {f_type} attribute {inst}.{attr} to the "
"{val_type} value {val}, which does not satisfy the condition "
"{f_type}.".format(
f_type=self.field_type,
inst=instance.__class__.__name__,
attr=self.name,
val_type=value.__class__.__name__,
val=value))
|
python
|
{
"resource": ""
}
|
q278426
|
Descriptor.exc_thrown_by_descriptor
|
test
|
def exc_thrown_by_descriptor():
"""Return True if the last exception was thrown by a
Descriptor instance.
"""
traceback = sys.exc_info()[2]
tb_locals = traceback.tb_frame.f_locals
# relying on naming convention to get the object that threw
# the exception
if "self" in tb_locals:
if not isinstance(tb_locals["self"], Descriptor):
return False
return True
return False
|
python
|
{
"resource": ""
}
|
q278427
|
Series._set_data
|
test
|
def _set_data(self):
"""
This method will be called to set Series data
"""
if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False):
_x = XVariable()
_y = YVariable()
_x.contribute_to_class(self, 'X', self.data)
_y.contribute_to_class(self, 'Y', self.data)
self['data'] = zip(self._x.points, self._y.points)
else:
for axis in ('_x', '_y'):
axis_obj = getattr(self, axis, False)
if not axis_obj:
raise exception.MissingAxisException("%s missing" % axis)
if not getattr(axis_obj, 'points', False):
raise exception.MissingDataException()
self['data'] = zip(self._x.points, self._y.points)
|
python
|
{
"resource": ""
}
|
q278428
|
Graph._get_axis_mode
|
test
|
def _get_axis_mode(self, axis):
"will get the axis mode for the current series"
if all([isinstance(getattr(s, axis), TimeVariable) for s in self._series]):
return 'time'
return None
|
python
|
{
"resource": ""
}
|
q278429
|
Graph._set_options
|
test
|
def _set_options(self):
"sets the graph ploting options"
# this is aweful
# FIXME: Axis options should be passed completly by a GraphOption
if 'xaxis' in self._options.keys():
self._options['xaxis'].update(
{'mode' : self._get_axis_mode(XAxis._var_name)})
if 'yaxis' in self._options.keys():
self._options['yaxis'].update(
{'mode' : self._get_axis_mode(YAxis._var_name)})
|
python
|
{
"resource": ""
}
|
q278430
|
make_class
|
test
|
def make_class(clsname, func, attrs):
"""Turn a funcs list element into a class object."""
clsdict = {"__set__": create_setter(func, attrs)}
if len(attrs) > 0:
clsdict["__init__"] = create_init(attrs)
clsobj = type(str(clsname), (Descriptor, ), clsdict)
clsobj.__doc__ = docstrings.get(clsname)
return clsobj
|
python
|
{
"resource": ""
}
|
q278431
|
DashboardRunner.cycle
|
test
|
def cycle(self):
"""
Cycles through notifications with latest results from data feeds.
"""
messages = self.poll_datafeeds()
notifications = self.process_notifications(messages)
self.draw_notifications(notifications)
|
python
|
{
"resource": ""
}
|
q278432
|
ForceNumeric.try_convert
|
test
|
def try_convert(value):
"""Convert value to a numeric value or raise a ValueError
if that isn't possible.
"""
convertible = ForceNumeric.is_convertible(value)
if not convertible or isinstance(value, bool):
raise ValueError
if isinstance(str(value), str):
return ForceNumeric.str_to_num(value)
return float(value)
|
python
|
{
"resource": ""
}
|
q278433
|
ForceNumeric.str_to_num
|
test
|
def str_to_num(str_value):
"""Convert str_value to an int or a float, depending on the
numeric value represented by str_value.
"""
str_value = str(str_value)
try:
return int(str_value)
except ValueError:
return float(str_value)
|
python
|
{
"resource": ""
}
|
q278434
|
plot
|
test
|
def plot(parser, token):
"""
Tag to plot graphs into the template
"""
tokens = token.split_contents()
tokens.pop(0)
graph = tokens.pop(0)
attrs = dict([token.split("=") for token in tokens])
if 'id' not in attrs.keys():
attrs['id'] = ''.join([chr(choice(range(65, 90))) for i in range(0, 5)])
else:
attrs['id'] = attrs['id'][1:len(attrs['id'])-1]
attr_string = ''.join([" %s=%s" % (k, v) for k, v in attrs.iteritems()])
return GraphRenderer(graph, attr_string, attrs['id'])
|
python
|
{
"resource": ""
}
|
q278435
|
force_unicode
|
test
|
def force_unicode(raw):
'''Try really really hard to get a Unicode copy of a string.
First try :class:`BeautifulSoup.UnicodeDammit` to try to force
to Unicode; if that fails, assume UTF-8 encoding, and ignore
all errors.
:param str raw: string to coerce
:return: Unicode approximation of `raw`
:returntype: :class:`unicode`
'''
converted = UnicodeDammit(raw, isHTML=True)
if not converted.unicode:
converted.unicode = unicode(raw, 'utf8', errors='ignore')
encoding_m = encoding_re.match(converted.unicode)
if encoding_m:
converted.unicode = \
encoding_m.group('start_xml') + \
encoding_m.group('remainder')
return converted.unicode
|
python
|
{
"resource": ""
}
|
q278436
|
make_clean_html
|
test
|
def make_clean_html(raw, stream_item=None, encoding=None):
'''Get a clean text representation of presumed HTML.
Treat `raw` as though it is HTML, even if we have no idea what it
really is, and attempt to get a properly formatted HTML document
with all HTML-escaped characters converted to their unicode.
This is called below by the `clean_html` transform stage, which
interprets MIME-type. If `character_encoding` is not provided,
and `stream_item` is provided, then this falles back to
:attr:`streamcorpus.StreamItem.body.encoding`.
:param str raw: raw text to clean up
:param stream_item: optional stream item with encoding metadata
:type stream_item: :class:`streamcorpus.StreamItem`
:returns: UTF-8-encoded byte string of cleaned HTML text
:returntype: :class:`str`
'''
# Fix emails by protecting the <,> from HTML
raw = fix_emails(raw)
raw_decoded = nice_decode(raw, stream_item=stream_item, encoding=encoding)
if raw_decoded is None:
# give up on decoding it... maybe this should use force_unicode
raw_decoded = raw
# default attempt uses vanilla lxml.html
try:
root = lxml.html.document_fromstring(raw_decoded)
except ValueError, exc:
if 'with encoding declaration' in str(exc):
root = lxml.html.document_fromstring(raw)
else:
raise
# While we have the document parsed as a DOM, let's strip attributes.
# (The HTML cleaner seems to only support whitelisting attributes.
# As of now, we just want to blacklist a few.)
lxml.etree.strip_attributes(root, 'class', 'id')
# if that worked, then we will be able to generate a
# valid HTML string
fixed_html = lxml.html.tostring(root, encoding=unicode)
# remove any ^M characters
fixed_html = string.replace(fixed_html, '\r', ' ')
# We drop utf8 characters that are above 0xFFFF as
# Lingpipe seems to be doing the wrong thing with them.
fixed_html = drop_invalid_and_upper_utf8_chars(fixed_html)
# construct a Cleaner that removes any ``<script>`` tags,
# Javascript, like an ``onclick`` attribute, comments, style
# tags or attributes, ``<link>`` tags
cleaner = lxml.html.clean.Cleaner(
scripts=True, javascript=True,
comments=True,
# do not remove <html> <head> <title> etc
page_structure=False,
remove_tags=['base'],
style=True, links=True)
# now get the really sanitized HTML
_clean_html = cleaner.clean_html(fixed_html)
# generate pretty HTML in utf-8
_clean_html = lxml.html.tostring(
lxml.html.document_fromstring(_clean_html),
method='html', encoding='utf-8',
pretty_print=True,
# include_meta_content_type=True
)
return uniform_html(_clean_html)
|
python
|
{
"resource": ""
}
|
q278437
|
clean_html.is_matching_mime_type
|
test
|
def is_matching_mime_type(self, mime_type):
'''This implements the MIME-type matching logic for deciding whether
to run `make_clean_html`
'''
if len(self.include_mime_types) == 0:
return True
if mime_type is None:
return False
mime_type = mime_type.lower()
# NB: startswith is necessary here, because encodings are
# often appended to HTTP header Content-Type
return any(mime_type.startswith(mt) for mt in self.include_mime_types)
|
python
|
{
"resource": ""
}
|
q278438
|
domain_name_cleanse
|
test
|
def domain_name_cleanse(raw_string):
'''extract a lower-case, no-slashes domain name from a raw string
that might be a URL
'''
try:
parts = urlparse(raw_string)
domain = parts.netloc.split(':')[0]
except:
domain = ''
if not domain:
domain = raw_string
if not domain:
return ''
domain = re.sub('\/', '', domain.strip().lower())
return domain
|
python
|
{
"resource": ""
}
|
q278439
|
domain_name_left_cuts
|
test
|
def domain_name_left_cuts(domain):
'''returns a list of strings created by splitting the domain on
'.' and successively cutting off the left most portion
'''
cuts = []
if domain:
parts = domain.split('.')
for i in range(len(parts)):
cuts.append( '.'.join(parts[i:]))
return cuts
|
python
|
{
"resource": ""
}
|
q278440
|
keyword_indexer.make_hash_kw
|
test
|
def make_hash_kw(self, tok):
'''Get a Murmur hash and a normalized token.
`tok` may be a :class:`unicode` string or a UTF-8-encoded
byte string. :data:`DOCUMENT_HASH_KEY`, hash value 0, is
reserved for the document count, and this function remaps
that value.
:param tok: token to hash
:return: pair of normalized `tok` and its hash
'''
if isinstance(tok, unicode):
tok = tok.encode('utf-8')
h = mmh3.hash(tok)
if h == DOCUMENT_HASH_KEY:
h = DOCUMENT_HASH_KEY_REPLACEMENT
return (tok, h)
|
python
|
{
"resource": ""
}
|
q278441
|
keyword_indexer.collect_words
|
test
|
def collect_words(self, si):
'''Collect all of the words to be indexed from a stream item.
This scans `si` for all of the configured tagger IDs. It
collects all of the token values (the
:attr:`streamcorpus.Token.token`) and returns a
:class:`collections.Counter` of them.
:param si: stream item to scan
:type si: :class:`streamcorpus.StreamItem`
:return: counter of :class:`unicode` words to index
:returntype: :class:`collections.Counter`
'''
counter = Counter()
for tagger_id, sentences in si.body.sentences.iteritems():
if ((self.keyword_tagger_ids is not None
and tagger_id not in self.keyword_tagger_ids)):
continue
for sentence in sentences:
for token in sentence.tokens:
term = token.token # always a UTF-8 byte string
term = term.decode('utf-8')
term = cleanse(term)
if ((self.keyword_size_limit is not None and
len(term) > self.keyword_size_limit)):
continue
if term not in self.stop_words:
counter[term] += 1
return counter
|
python
|
{
"resource": ""
}
|
q278442
|
keyword_indexer.index
|
test
|
def index(self, si):
'''Record index records for a single document.
Which indexes this creates depends on the parameters to the
constructor. This records all of the requested indexes for
a single document.
'''
if not si.body.clean_visible:
logger.warn('stream item %s has no clean_visible part, '
'skipping keyword indexing', si.stream_id)
return
# Count tokens in si.clean_visible
# We will recycle hash==0 for "# of documents"
hash_counts = defaultdict(int)
hash_counts[DOCUMENT_HASH_KEY] = 1
hash_kw = defaultdict(int)
words = self.collect_words(si)
for tok, count in words.iteritems():
(tok, tok_hash) = self.make_hash_kw(tok)
hash_counts[tok_hash] += count
hash_kw[tok] = tok_hash
# Convert this and write it out
if self.hash_docs:
(k1, k2) = key_for_stream_item(si)
kvps = [((h, k1, k2), n) for (h, n) in hash_counts.iteritems()
if h != DOCUMENT_HASH_KEY]
self.client.put(HASH_TF_INDEX_TABLE, *kvps)
if self.hash_frequencies:
kvps = [((h,), 1) for h in hash_counts.iterkeys()]
self.client.increment(HASH_FREQUENCY_TABLE, *kvps)
if self.hash_keywords:
kvps = [((h, t), 1) for (t, h) in hash_kw.iteritems()]
self.client.increment(HASH_KEYWORD_INDEX_TABLE, *kvps)
|
python
|
{
"resource": ""
}
|
q278443
|
keyword_indexer.invert_hash
|
test
|
def invert_hash(self, tok_hash):
'''Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings
'''
return [tok_encoded.decode('utf8')
for (_, tok_encoded) in
self.client.scan_keys(HASH_KEYWORD_INDEX_TABLE,
((tok_hash,), (tok_hash,)))]
|
python
|
{
"resource": ""
}
|
q278444
|
keyword_indexer.document_frequencies
|
test
|
def document_frequencies(self, hashes):
'''Get document frequencies for a list of hashes.
This will return all zeros unless the index was written with
`hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is
included in `hashes`, that value will be returned with the
total number of documents indexed. If you are looking for
documents with that hash, pass
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param hashes: hashes to query
:paramtype hashes: list of :class:`int`
:return: map from hash to document frequency
'''
result = {}
for (k, v) in self.client.get(HASH_FREQUENCY_TABLE,
*[(h,) for h in hashes]):
if v is None:
v = 0
result[k[0]] = v
return result
|
python
|
{
"resource": ""
}
|
q278445
|
keyword_indexer.lookup
|
test
|
def lookup(self, h):
'''Get stream IDs for a single hash.
This yields strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`,
or fed back into :mod:`coordinate` or other job queue systems.
Note that for common terms this can return a large number of
stream IDs! This is a scan over a dense region of a
:mod:`kvlayer` table so it should be reasonably efficient,
but be prepared for it to return many documents in a large
corpus. Blindly storing the results in a :class:`list`
may be inadvisable.
This will return nothing unless the index was written with
:attr:`hash_docs` set. No document will correspond to
:data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int h: Murmur hash to look up
'''
for (_, k1, k2) in self.client.scan_keys(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield kvlayer_key_to_stream_id((k1, k2))
|
python
|
{
"resource": ""
}
|
q278446
|
keyword_indexer.lookup_tf
|
test
|
def lookup_tf(self, h):
'''Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
'''
for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield (kvlayer_key_to_stream_id((k1, k2)), v)
|
python
|
{
"resource": ""
}
|
q278447
|
_make_stream_items
|
test
|
def _make_stream_items(f):
"""Given a spinn3r feed, produce a sequence of valid StreamItems.
Because of goopy Python interactions, you probably need to call
this and re-yield its results, as
>>> with open(filename, 'rb') as f:
... for si in _make_stream_items(f):
... yield si
"""
reader = ProtoStreamReader(f)
return itertools.ifilter(
lambda x: x is not None,
itertools.imap(_make_stream_item, reader))
|
python
|
{
"resource": ""
}
|
q278448
|
_make_stream_item
|
test
|
def _make_stream_item(entry):
"""Given a single spinn3r feed entry, produce a single StreamItem.
Returns 'None' if a complete item can't be constructed.
"""
# get standard metadata, assuming it's present...
if not hasattr(entry, 'permalink_entry'):
return None
pe = entry.permalink_entry
# ...and create a streamitem...
si = streamcorpus.make_stream_item(
pe.date_found[:-1] + '.0Z',
pe.canonical_link.href.encode('utf8'))
if not si.stream_time:
logger.debug('failed to generate stream_time from {0!r}'
.format(pe.date_found))
return None
if not si.abs_url:
logger.debug('failed to generate abs_url from {0!r}'
.format(pe.canonical_link.href))
return None
# ...filling in the actual data
si.body = _make_content_item(
pe.content,
alternate_data=entry.feed_entry.content.data)
if not si.body:
return None
if not si.body.raw:
return None
if pe.content_extract.data:
si.other_content['extract'] = _make_content_item(pe.content_extract)
si.other_content['title'] = streamcorpus.ContentItem(
raw=pe.title.encode('utf8'),
media_type=pe.content_extract.mime_type,
encoding='UTF-8')
si.other_content['feed_entry_title'] = streamcorpus.ContentItem(
raw=entry.feed_entry.title.encode('utf8'),
media_type=entry.feed_entry.content.mime_type,
encoding='UTF-8')
if entry.feed_entry.content.data:
si.other_content['feed_entry'] = _make_content_item(
entry.feed_entry.content)
si.source_metadata['lang'] = pe.lang[0].code
si.source_metadata['author'] = json.dumps(
dict(
name=pe.author[0].name,
email=pe.author[0].email,
link=pe.author[0].link[0].href,
)
)
si.source = entry.source.publisher_type
return si
|
python
|
{
"resource": ""
}
|
q278449
|
_make_content_item
|
test
|
def _make_content_item(node, mime_type=None, alternate_data=None):
"""Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
"""
raw = node.data
if getattr(node, 'encoding', None) == 'zlib':
try:
raw = zlib.decompress(node.data)
except Exception, exc:
if alternate_data is not None:
try:
raw = zlib.decompress(alternate_data)
except Exception:
raise exc # the original exception
else:
raise
if mime_type is None:
mime_type = node.mime_type
raw = raw.decode('utf8').encode('utf8')
return streamcorpus.ContentItem(raw=raw, media_type=mime_type)
|
python
|
{
"resource": ""
}
|
q278450
|
ProtoStreamReader._read_varint
|
test
|
def _read_varint(self):
"""Read exactly a varint out of the underlying file."""
buf = self._read(8)
(n, l) = _DecodeVarint(buf, 0)
self._unread(buf[l:])
return n
|
python
|
{
"resource": ""
}
|
q278451
|
ProtoStreamReader._read_a
|
test
|
def _read_a(self, cls):
"""Read some protobuf-encoded object stored in a single block
out of the file."""
o = cls()
o.ParseFromString(self._read_block())
return o
|
python
|
{
"resource": ""
}
|
q278452
|
serialize_si_key
|
test
|
def serialize_si_key(si_key):
'''
Return packed bytes representation of StreamItem kvlayer key.
The result is 20 bytes, 16 of md5 hash, 4 of int timestamp.
'''
if len(si_key[0]) != 16:
raise ValueError('bad StreamItem key, expected 16 byte '
'md5 hash binary digest, got: {0!r}'.format(si_key))
return struct.pack('>16si', si_key[0], si_key[1])
|
python
|
{
"resource": ""
}
|
q278453
|
streamitem_to_key_data
|
test
|
def streamitem_to_key_data(si):
'''
extract the parts of a StreamItem that go into a kvlayer key,
convert StreamItem to blob for storage.
return (kvlayer key tuple), data blob
'''
key = key_for_stream_item(si)
data = streamcorpus.serialize(si)
errors, data = streamcorpus.compress_and_encrypt(data)
assert not errors, errors
return key, data
|
python
|
{
"resource": ""
}
|
q278454
|
working_directory
|
test
|
def working_directory(path):
"""Change working directory and restore the previous on exit"""
prev_dir = os.getcwd()
os.chdir(str(path))
try:
yield
finally:
os.chdir(prev_dir)
|
python
|
{
"resource": ""
}
|
q278455
|
strip_prefix
|
test
|
def strip_prefix(s, prefix, strict=False):
"""Removes the prefix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the prefix was present"""
if s.startswith(prefix):
return s[len(prefix) :]
elif strict:
raise WimpyError("string doesn't start with prefix")
return s
|
python
|
{
"resource": ""
}
|
q278456
|
strip_suffix
|
test
|
def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s
|
python
|
{
"resource": ""
}
|
q278457
|
is_subsequence
|
test
|
def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True
|
python
|
{
"resource": ""
}
|
q278458
|
cube
|
test
|
def cube():
"""Return an Ice application with a default home page.
Create :class:`Ice` object, add a route to return the default page
when a client requests the server root, i.e. /, using HTTP GET
method, add an error handler to return HTTP error pages when an
error occurs and return this object. The returned object can be used
as a WSGI application.
Returns:
Ice: WSGI application.
"""
app = Ice()
@app.get('/')
def default_home_page():
"""Return a default home page."""
return simple_html('It works!',
'<h1>It works!</h1>\n'
'<p>This is the default ice web page.</p>')
@app.error()
def generic_error_page():
"""Return a simple and generic error page."""
return simple_html(app.response.status_line,
'<h1>{title}</h1>\n'
'<p>{description}</p>\n'
'<hr>\n'
'<address>Ice/{version}</address>'.format(
title=app.response.status_line,
description=app.response.status_detail,
version=__version__))
def simple_html(title, body):
"""Return a simple HTML page."""
return (
'<!DOCTYPE html>\n'
'<html>\n<head><title>{title}</title></head>\n'
'<body>\n{body}\n</body>\n</html>\n'
).format(title=title, body=body)
return app
|
python
|
{
"resource": ""
}
|
q278459
|
Ice.run
|
test
|
def run(self, host='127.0.0.1', port=8080):
"""Run the application using a simple WSGI server.
Arguments:
host (str, optional): Host on which to listen.
port (int, optional): Port number on which to listen.
"""
from wsgiref import simple_server
self._server = simple_server.make_server(host, port, self)
self._server.serve_forever()
|
python
|
{
"resource": ""
}
|
q278460
|
Ice.exit
|
test
|
def exit(self):
"""Stop the simple WSGI server running the appliation."""
if self._server is not None:
self._server.shutdown()
self._server.server_close()
self._server = None
|
python
|
{
"resource": ""
}
|
q278461
|
Ice.route
|
test
|
def route(self, method, pattern):
"""Decorator to add route for a request with any HTTP method.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
pattern (str): Routing pattern the path must match.
Returns:
function: Decorator function to add route.
"""
def decorator(callback):
self._router.add(method, pattern, callback)
return callback
return decorator
|
python
|
{
"resource": ""
}
|
q278462
|
Ice.error
|
test
|
def error(self, status=None):
"""Decorator to add a callback that generates error page.
The *status* parameter specifies the HTTP response status code
for which the decorated callback should be invoked. If the
*status* argument is not specified, then the decorated callable
is considered to be a fallback callback.
A fallback callback, when defined, is invoked to generate the
error page for any HTTP response representing an error when
there is no error handler defined explicitly for the response
code of the HTTP response.
Arguments:
status(int, optional): HTTP response status code.
Returns:
function: Decorator function to add error handler.
"""
def decorator(callback):
self._error_handlers[status] = callback
return callback
return decorator
|
python
|
{
"resource": ""
}
|
q278463
|
Ice.static
|
test
|
def static(self, root, path, media_type=None, charset='UTF-8'):
"""Send content of a static file as response.
The path to the document root directory should be specified as
the root argument. This is very important to prevent directory
traversal attack. This method guarantees that only files within
the document root directory are served and no files outside this
directory can be accessed by a client.
The path to the actual file to be returned should be specified
as the path argument. This path must be relative to the document
directory.
The *media_type* and *charset* arguments are used to set the
Content-Type header of the HTTP response. If *media_type*
is not specified or specified as ``None`` (the default), then it
is guessed from the filename of the file to be returned.
Arguments:
root (str): Path to document root directory.
path (str): Path to file relative to document root directory.
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
bytes: Content of file to be returned in the HTTP response.
"""
root = os.path.abspath(os.path.join(root, ''))
path = os.path.abspath(os.path.join(root, path.lstrip('/\\')))
# Save the filename from the path in the response state, so that
# a following download() call can default to this filename for
# downloadable file when filename is not explicitly specified.
self.response.state['filename'] = os.path.basename(path)
if not path.startswith(root):
return 403
elif not os.path.isfile(path):
return 404
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(path)[0]
self.response.charset = charset
with open(path, 'rb') as f:
return f.read()
|
python
|
{
"resource": ""
}
|
q278464
|
Ice._get_error_page_callback
|
test
|
def _get_error_page_callback(self):
"""Return an error page for the current response status."""
if self.response.status in self._error_handlers:
return self._error_handlers[self.response.status]
elif None in self._error_handlers:
return self._error_handlers[None]
else:
# Rudimentary error handler if no error handler was found
self.response.media_type = 'text/plain'
return lambda: self.response.status_line
|
python
|
{
"resource": ""
}
|
q278465
|
Router.add
|
test
|
def add(self, method, pattern, callback):
"""Add a route.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc.
pattern (str): Pattern that request paths must match.
callback (str): Route handler that is invoked when a request
path matches the *pattern*.
"""
pat_type, pat = self._normalize_pattern(pattern)
if pat_type == 'literal':
self._literal[method][pat] = callback
elif pat_type == 'wildcard':
self._wildcard[method].append(WildcardRoute(pat, callback))
else:
self._regex[method].append(RegexRoute(pat, callback))
|
python
|
{
"resource": ""
}
|
q278466
|
Router.resolve
|
test
|
def resolve(self, method, path):
"""Resolve a request to a route handler.
Arguments:
method (str): HTTP method, e.g. GET, POST, etc. (type: str)
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
if method in self._literal and path in self._literal[method]:
return self._literal[method][path], [], {}
else:
return self._resolve_non_literal_route(method, path)
|
python
|
{
"resource": ""
}
|
q278467
|
Router._resolve_non_literal_route
|
test
|
def _resolve_non_literal_route(self, method, path):
"""Resolve a request to a wildcard or regex route handler.
Arguments:
method (str): HTTP method name, e.g. GET, POST, etc.
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if no route matches the request.
"""
for route_dict in (self._wildcard, self._regex):
if method in route_dict:
for route in reversed(route_dict[method]):
callback_data = route.match(path)
if callback_data is not None:
return callback_data
return None
|
python
|
{
"resource": ""
}
|
q278468
|
Router._normalize_pattern
|
test
|
def _normalize_pattern(pattern):
"""Return a normalized form of the pattern.
Normalize the pattern by removing pattern type prefix if it
exists in the pattern. Then return the pattern type and the
pattern as a tuple of two strings.
Arguments:
pattern (str): Route pattern to match request paths
Returns:
tuple: Ruple of pattern type (str) and pattern (str)
"""
if pattern.startswith('regex:'):
pattern_type = 'regex'
pattern = pattern[len('regex:'):]
elif pattern.startswith('wildcard:'):
pattern_type = 'wildcard'
pattern = pattern[len('wildcard:'):]
elif pattern.startswith('literal:'):
pattern_type = 'literal'
pattern = pattern[len('literal:'):]
elif RegexRoute.like(pattern):
pattern_type = 'regex'
elif WildcardRoute.like(pattern):
pattern_type = 'wildcard'
else:
pattern_type = 'literal'
return pattern_type, pattern
|
python
|
{
"resource": ""
}
|
q278469
|
Response.response
|
test
|
def response(self):
"""Return the HTTP response body.
Returns:
bytes: HTTP response body as a sequence of bytes
"""
if isinstance(self.body, bytes):
out = self.body
elif isinstance(self.body, str):
out = self.body.encode(self.charset)
else:
out = b''
self.add_header('Content-Type', self.content_type)
self.add_header('Content-Length', str(len(out)))
self.start(self.status_line, self._headers)
return [out]
|
python
|
{
"resource": ""
}
|
q278470
|
Response.add_header
|
test
|
def add_header(self, name, value):
"""Add an HTTP header to response object.
Arguments:
name (str): HTTP header field name
value (str): HTTP header field value
"""
if value is not None:
self._headers.append((name, value))
|
python
|
{
"resource": ""
}
|
q278471
|
Response.set_cookie
|
test
|
def set_cookie(self, name, value, attrs={}):
"""Add a Set-Cookie header to response object.
For a description about cookie attribute values, see
https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel.
Arguments:
name (str): Name of the cookie
value (str): Value of the cookie
attrs (dict): Dicitionary with cookie attribute keys and
values.
"""
cookie = http.cookies.SimpleCookie()
cookie[name] = value
for key, value in attrs.items():
cookie[name][key] = value
self.add_header('Set-Cookie', cookie[name].OutputString())
|
python
|
{
"resource": ""
}
|
q278472
|
Response.status_line
|
test
|
def status_line(self):
"""Return the HTTP response status line.
The status line is determined from :attr:`status` code. For
example, if the status code is 200, then '200 OK' is returned.
Returns:
str: Status line
"""
return (str(self.status) + ' ' +
Response._responses[self.status].phrase)
|
python
|
{
"resource": ""
}
|
q278473
|
Response.content_type
|
test
|
def content_type(self):
"""Return the value of Content-Type header field.
The value for the Content-Type header field is determined from
the :attr:`media_type` and :attr:`charset` data attributes.
Returns:
str: Value of Content-Type header field
"""
if (self.media_type is not None and
self.media_type.startswith('text/') and
self.charset is not None):
return self.media_type + '; charset=' + self.charset
else:
return self.media_type
|
python
|
{
"resource": ""
}
|
q278474
|
MultiDict.getall
|
test
|
def getall(self, key, default=[]):
"""Return the list of all values for the specified key.
Arguments:
key (object): Key
default (list): Default value to return if the key does not
exist, defaults to ``[]``, i.e. an empty list.
Returns:
list: List of all values for the specified key if the key
exists, ``default`` otherwise.
"""
return self.data[key] if key in self.data else default
|
python
|
{
"resource": ""
}
|
q278475
|
rmtree
|
test
|
def rmtree(path, use_shutil=True, followlinks=False, retries=10):
'''remove all files and directories below path, including path
itself; works even when shutil.rmtree fails because of read-only
files in NFS and Windows. Follows symlinks.
`use_shutil` defaults to True; useful for testing
`followlinks` defaults to False; if set to True, shutil.rmtree is
not used.
'''
if use_shutil and not followlinks:
try:
shutil.rmtree(path)
return
except Exception, exc:
logger.info('shutil.rmtree(%s) failed, so resorting to recursive delete', path)
logger.debug('\ntrapped:\n%s', traceback.format_exc(exc))
if not os.path.isdir(path):
os.remove(path)
return
## bottom up traversal removing files and then removing directories
for root, dir_names, file_names in os.walk(path, topdown=False, followlinks=followlinks):
for fname in file_names:
fpath = os.path.join(root, fname)
tries = 0
while tries < retries:
tries += 1
try:
os.remove(fpath)
break
except Exception, exc:
time.sleep(0.1)
if os.path.exists(fpath):
logger.critical('os.remove(%s) failed, so leaving data behind!!!', fpath)
logger.critical('\ntrapped:\n%s', traceback.format_exc(exc))
#logger.critical(get_open_fds())
for dname in dir_names:
full_path = os.path.join(root, dname)
if os.path.islink(full_path):
real_path = os.path.realpath(full_path)
os.remove(full_path)
full_path = real_path
os.rmdir(full_path)
if os.path.exists(path):
os.rmdir(path)
|
python
|
{
"resource": ""
}
|
q278476
|
get_open_fds
|
test
|
def get_open_fds(verbose=False):
'''return list of open files for current process
.. warning: will only work on UNIX-like os-es.
'''
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
if verbose:
oprocs = subprocess.check_output(
[ "lsof", '-w', "-p", str( pid ) ] )
logger.info(oprocs)
open_files = filter(
lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),
procs.split( '\n' ) )
return open_files
|
python
|
{
"resource": ""
}
|
q278477
|
file_type_stats
|
test
|
def file_type_stats(config):
'''
returns a kba.pipeline "transform" function that generates file
type stats from the stream_items that it sees. Currently, these
stats are just the first five non-whitespace characters.
'''
## make a closure around config
def _file_type_stats(stream_item, context):
if stream_item.body and stream_item.body.raw:
#print repr(stream_item.body.raw[:250])
#sys.stdout.flush()
#doctype_m = doctype_re.match(stream_item.body.raw[:250])
#if doctype_m:
#print 'DOCTYPE: %s' % repr(doctype_m.group('doctype').lower())
if 'doctype html' in stream_item.body.raw[:250].lower():
print 'DOCTYPE: html'
else:
#if probably_html.search(stream_item.body.raw):
if has_tags(stream_item.body.raw[:400]):
print 'PROBABLY_HTML'
else:
xml = xml_ish.search(stream_item.body.raw)
if xml:
print 'XML: %s' % repr(xml.group('intro'))
else:
pdf = pdf_start.search(stream_item.body.raw)
if pdf:
print 'PDF %s' % repr(pdf.group('version'))
else:
ext = stream_item.abs_url.split('.')[-1]
if len(ext) < 6:
print 'UNK ext: %s' % repr(ext)
else:
first = first_letters.match(stream_item.body.raw)
if first and False:
print 'UNK letters: %s' % repr(first.group('first_letters'))
else:
print 'UNK first bytes: %s' % repr(stream_item.body.raw[:50])
#m = first_three_letters.search(stream_item.body.raw)
#if m:
# print repr(m.group('first_three_letters')).lower().strip()
#else:
# print repr(stream_item.body.raw[:50]).lower().strip()
return stream_item
return _file_type_stats
|
python
|
{
"resource": ""
}
|
q278478
|
rejester_run
|
test
|
def rejester_run(work_unit):
'''get a rejester.WorkUnit with KBA s3 path, fetch it, and save
some counts about it.
'''
#fname = 'verify-chunks-%d-%d' % (os.getpid(), time.time())
fname = work_unit.key.strip().split('/')[-1]
output_dir_path = work_unit.data.get('output_dir_path', '/mnt')
u = uuid.uuid3(uuid.UUID(int=0), work_unit.key.strip())
path1 = u.hex[0]
path2 = u.hex[1]
fpath = os.path.join(output_dir_path, path1, path2, fname)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
output = gzip.open(fpath + '-out.gz', 'wb')
expected_si_count = int(fname.split('-')[1])
max_tries = 20
tries = 0
while tries < max_tries:
try:
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids = \
attempt_fetch(work_unit, fpath)
if si_count != expected_si_count:
print 'retrying because si_count = %d != %d expected_si_count' % (si_count, expected_si_count)
sys.stdout.flush()
tries += 1
continue
else:
print 'succeeded in reading si_count = %d' % (si_count,)
sys.stdout.flush()
output.write( '%s\t%d\t%d\t%d\t%d\t%s\t%s\n' % (
exc, si_count, serif_count, clean_visible_bytes, clean_visible_count,
work_unit.key.strip(), ','.join(['%s|%s' % tup for tup in stream_ids])) )
break
except Exception, exc:
print 'broken?'
print traceback.format_exc(exc)
sys.stdout.flush()
tries += 1
output.write(traceback.format_exc(exc))
output.close()
|
python
|
{
"resource": ""
}
|
q278479
|
attempt_fetch
|
test
|
def attempt_fetch(work_unit, fpath):
'''attempt a fetch and iteration over a work_unit.key path in s3
'''
url = 'http://s3.amazonaws.com/aws-publicdatasets/' + work_unit.key.strip()
## cheapest way to iterate over the corpus is a few stages of
## streamed child processes. Note that stderr needs to go
## separately to a file so that reading the stdin doesn't get
## blocked:
cmd = '(wget -O - %s | gpg --no-permission-warning --trust-model always --output - --decrypt - | xz --decompress) 2> %s-err' % (url, fpath)
print cmd
child = Popen(cmd, stdout=PIPE, shell=True)
print 'child launched'
sys.stdout.flush()
si_count = 0
serif_count = 0
exc = ''
stream_ids = list()
clean_visible_bytes = 0
clean_visible_count = 0
try:
for si in Chunk(file_obj=child.stdout):
print si.stream_id, si.abs_url
if si.body.language:
lang = si.body.language.code
else:
lang = ''
stream_ids.append((lang, si.stream_id))
if si.body.clean_visible:
clean_visible_count += 1
clean_visible_bytes += len(si.body.clean_visible)
si_count += 1
if 'serif' in si.body.sentences:
serif_count += 1
except Exception, exc:
exc = re.sub('\s+', ' ', str(exc)).strip()
child.terminate()
child.wait()
child.stdout.close()
return exc, si_count, serif_count, clean_visible_bytes, clean_visible_count, stream_ids
|
python
|
{
"resource": ""
}
|
q278480
|
get_file_lines
|
test
|
def get_file_lines(file_name):
"""Return a list of non-empty lines from `file_path`."""
file_path = path.join(path.dirname(path.abspath(__file__)), file_name)
with open(file_path) as file_obj:
return [line for line in file_obj.read().splitlines() if line]
|
python
|
{
"resource": ""
}
|
q278481
|
_random_adjspecies_pair
|
test
|
def _random_adjspecies_pair():
"""Return an ordered 2-tuple containing a species and a describer."""
describer, desc_position = random_describer()
if desc_position == 'prefix':
return (describer, random_species())
elif desc_position == 'suffix':
return (random_species(), describer)
|
python
|
{
"resource": ""
}
|
q278482
|
random_adjspecies_pair
|
test
|
def random_adjspecies_pair(maxlen=None, prevent_stutter=True):
"""
Return an ordered 2-tuple containing a species and a describer.
The letter-count of the pair is guarantee to not exceed `maxlen` if
it is given. If `prevent_stutter` is True, the last letter of the
first item of the pair will be different from the first letter of
the second item.
"""
while True:
pair = _random_adjspecies_pair()
if maxlen and len(''.join(pair)) > maxlen:
continue
if prevent_stutter and pair[0][-1] == pair[1][0]:
continue
return pair
|
python
|
{
"resource": ""
}
|
q278483
|
morph
|
test
|
def morph(ctx, app_id, sentence_file, json_flag,
sentence, info_filter, pos_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode, unicode) -> None # NOQA
""" Morphological analysis for Japanese."""
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if info_filter:
info_filter = info_filter.replace(',', '|')
if pos_filter:
pos_filter = pos_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.morph(
sentence=sentence,
info_filter=info_filter,
pos_filter=pos_filter,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for words in ret['word_list']:
for word in words:
click.echo(','.join(word))
|
python
|
{
"resource": ""
}
|
q278484
|
similarity
|
test
|
def similarity(ctx, app_id, json_flag, query_pair, request_id):
# type: (Context, unicode, bool, List[unicode], unicode) -> None
""" Scoring the similarity of two words. """
app_id = clean_app_id(app_id)
api = GoolabsAPI(app_id)
ret = api.similarity(
query_pair=query_pair,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo('{0:.16f}'.format(ret['score']))
|
python
|
{
"resource": ""
}
|
q278485
|
hiragana
|
test
|
def hiragana(ctx, app_id, sentence_file,
json_flag, sentence, output_type, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Convert the Japanese to Hiragana or Katakana. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.hiragana(
sentence=sentence,
output_type=output_type,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['converted'])
|
python
|
{
"resource": ""
}
|
q278486
|
entity
|
test
|
def entity(ctx, app_id, sentence_file,
json_flag, sentence, class_filter, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
""" Extract unique representation from sentence. """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
if class_filter:
class_filter = class_filter.replace(',', '|')
api = GoolabsAPI(app_id)
ret = api.entity(
sentence=sentence,
class_filter=class_filter,
request_id=request_id
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for ne in ret['ne_list']:
click.echo(','.join(ne))
|
python
|
{
"resource": ""
}
|
q278487
|
shortsum
|
test
|
def shortsum(ctx, app_id, review_file,
json_flag, review, length, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Summarize reviews into a short summary."""
app_id = clean_app_id(app_id)
review_list = clean_review(review, review_file)
length_int = clean_length(length) # type: Optional[int]
api = GoolabsAPI(app_id)
ret = api.shortsum(
review_list=review_list,
length=length_int,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
click.echo(ret['summary'])
|
python
|
{
"resource": ""
}
|
q278488
|
keyword
|
test
|
def keyword(ctx, app_id, body_file, json_flag,
title, body, max_num, forcus, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, int, unicode, unicode) -> None # NOQA
"""Extract "keywords" from an input document. """
app_id = clean_app_id(app_id)
body = clean_body(body, body_file)
api = GoolabsAPI(app_id)
ret = api.keyword(
title=title,
body=body,
max_num=max_num,
forcus=forcus,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for k in ret['keywords']:
k = dict((key.encode('utf-8'), k[key]) for key in k.keys())
for keyword, score in six.iteritems(k):
click.echo(u'{0},{1}'.format(text(keyword), score))
|
python
|
{
"resource": ""
}
|
q278489
|
chrono
|
test
|
def chrono(ctx, app_id, sentence_file,
json_flag, sentence, doc_time, request_id):
# type: (Context, unicode, Optional[IO], bool, unicode, unicode, unicode) -> None # NOQA
"""Extract expression expressing date and time and normalize its value """
app_id = clean_app_id(app_id)
sentence = clean_sentence(sentence, sentence_file)
api = GoolabsAPI(app_id)
ret = api.chrono(
sentence=sentence,
doc_time=doc_time,
request_id=request_id,
)
if json_flag:
click.echo(format_json(api.response.json()))
return
for pair in ret['datetime_list']:
click.echo(u'{0}: {1}'.format(text(pair[0]), pair[1]))
|
python
|
{
"resource": ""
}
|
q278490
|
PipelineFactory.create
|
test
|
def create(self, stage, scp_config, config=None):
'''Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config`
'''
# Figure out what we have for a stage and its name
if isinstance(stage, basestring):
stage_name = stage
stage_obj = self.registry[stage_name]
else:
stage_name = getattr(stage, 'config_name', stage.__name__)
stage_obj = stage
# Find the configuration; get a copy we can mutate
if config is None:
config = scp_config.get(stage_name, None)
if config is None:
config = getattr(stage_obj, 'default_config', {})
config = dict(config)
# Fill in more values
if self.tmp_dir_suffix is None:
config['tmp_dir_path'] = scp_config['tmp_dir_path']
else:
config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'],
self.tmp_dir_suffix)
config['third_dir_path'] = scp_config['third_dir_path']
return stage_obj(config)
|
python
|
{
"resource": ""
}
|
q278491
|
PipelineFactory._init_stages
|
test
|
def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]]
|
python
|
{
"resource": ""
}
|
q278492
|
PipelineFactory._init_all_stages
|
test
|
def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path)
|
python
|
{
"resource": ""
}
|
q278493
|
Pipeline.run
|
test
|
def run(self, i_str, start_count=0, start_chunk_time=None):
'''Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
'''
try:
if not os.path.exists(self.tmp_dir_path):
os.makedirs(self.tmp_dir_path)
if start_chunk_time is None:
start_chunk_time = time.time()
## the reader returns generators of StreamItems
i_chunk = self.reader(i_str)
## t_path points to the currently in-progress temp chunk
t_path = None
## loop over all docs in the chunk processing and cutting
## smaller chunks if needed
len_clean_visible = 0
sources = set()
next_idx = 0
## how many have we input and actually done processing on?
input_item_count = 0
for si in i_chunk:
# TODO: break out a _process_stream_item function?
next_idx += 1
## yield to the gevent hub to allow other things to run
if gevent:
gevent.sleep(0)
## skip forward until we reach start_count
if next_idx <= start_count:
continue
if next_idx % self.rate_log_interval == 0:
## indexing is zero-based, so next_idx corresponds
## to length of list of SIs processed so far
elapsed = time.time() - start_chunk_time
if elapsed > 0:
rate = float(next_idx) / elapsed
logger.info('%d in %.1f --> %.1f per sec on '
'(pre-partial_commit) %s',
next_idx - start_count, elapsed, rate,
i_str)
if not self.t_chunk:
## make a temporary chunk at a temporary path
# (Lazy allocation after we've read an item that might get processed out to the new chunk file)
# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready
t_path = os.path.join(self.tmp_dir_path,
't_chunk-%s' % uuid.uuid4().hex)
self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb')
assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message
# TODO: a set of incremental transforms is equivalent
# to a batch transform. Make the pipeline explicitly
# configurable as such:
#
# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]
#
# OR: for some list of transforms (mixed incremental
# and batch) pipeline can detect and batchify as needed
## incremental transforms populate t_chunk
## let the incremental transforms destroy the si by
## returning None
si = self._run_incremental_transforms(
si, self.incremental_transforms)
## insist that every chunk has only one source string
if si:
sources.add(si.source)
if self.assert_single_source and len(sources) != 1:
raise InvalidStreamItem(
'stream item %r had source %r, not %r '
'(set assert_single_source: false to suppress)' %
(si.stream_id, si.source, sources))
if si and si.body and si.body.clean_visible:
len_clean_visible += len(si.body.clean_visible)
## log binned clean_visible lengths, for quick stats estimates
#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))
#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))
if ((self.output_chunk_max_count is not None and
len(self.t_chunk) == self.output_chunk_max_count)):
logger.info('reached output_chunk_max_count (%d) at: %d',
len(self.t_chunk), next_idx)
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
elif (self.output_max_clean_visible_bytes is not None and
len_clean_visible >=
self.output_chunk_max_clean_visible_bytes):
logger.info(
'reached output_chunk_max_clean_visible_bytes '
'(%d) at: %d',
self.output_chunk_max_clean_visible_bytes,
len_clean_visible)
len_clean_visible = 0
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
input_item_count += 1
if (((self.input_item_limit is not None) and
(input_item_count > self.input_item_limit))):
break
if self.t_chunk is not None:
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
## return how many stream items we processed
return next_idx
finally:
if self.t_chunk is not None:
self.t_chunk.close()
for transform in self.batch_transforms:
transform.shutdown()
if self.cleanup_tmp_files:
rmtree(self.tmp_dir_path)
|
python
|
{
"resource": ""
}
|
q278494
|
Pipeline._run_writers
|
test
|
def _run_writers(self, start_count, next_idx, sources, i_str, t_path):
'''Run all of the writers over some intermediate chunk.
:param int start_count: index of the first item
:param int next_idx: index of the next item (after the last
item in this chunk)
:param list sources: source strings included in this chunk
(usually only one source)
:param str i_str: name of input file or other input
:param str t_path: location of intermediate chunk on disk
:return: list of output file paths or other outputs
'''
# writers put the chunk somewhere, and could delete it
name_info = dict(
first=start_count,
# num and md5 computed in each writers
source=sources.pop(),
)
all_o_paths = []
for writer in self.writers:
logger.debug('running %r on %r: %r', writer, i_str, name_info)
o_paths = writer(t_path, name_info, i_str)
logger.debug('loaded (%d, %d) of %r into %r',
start_count, next_idx - 1, i_str, o_paths)
all_o_paths += o_paths
return all_o_paths
|
python
|
{
"resource": ""
}
|
q278495
|
Pipeline._run_incremental_transforms
|
test
|
def _run_incremental_transforms(self, si, transforms):
'''
Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None.
'''
## operate each transform on this one StreamItem
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn('transform %r deleted %s abs_url=%r',
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
## do nothing
logger.info('transform %r giving up on %r',
transform, si.stream_id)
except Exception, exc:
logger.critical(
'transform %r failed on %r from i_str=%r abs_url=%r',
transform, si and si.stream_id, self.context.get('i_str'),
si and si.abs_url, exc_info=True)
assert si is not None
## expect to always have a stream_time
if not si.stream_time:
raise InvalidStreamItem('empty stream_time: %s' % si)
if si.stream_id is None:
raise InvalidStreamItem('empty stream_id: %r' % si)
## put the StreamItem into the output
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem('incorrect stream item object %r' %
type(si))
self.t_chunk.add(si)
return si
|
python
|
{
"resource": ""
}
|
q278496
|
replace_config
|
test
|
def replace_config(config, name):
'''Replace the top-level pipeline configurable object.
This investigates a number of sources, including
`external_stages_path` and `external_stages_modules` configuration
and `streamcorpus_pipeline.stages` entry points, and uses these to
find the actual :data:`sub_modules` for
:mod:`streamcorpus_pipeline`.
'''
global static_stages
if static_stages is None:
static_stages = PipelineStages()
stages = static_stages
if 'external_stages_path' in config:
path = config['external_stages_path']
if not os.path.isabs(path) and config.get('root_path'):
path = os.path.join(config['root_path'], path)
try:
stages.load_external_stages(config['external_stages_path'])
except IOError:
return streamcorpus_pipeline # let check_config re-raise this
if 'external_stages_modules' in config:
for mod in config['external_stages_modules']:
try:
stages.load_module_stages(mod)
except ImportError:
return streamcorpus_pipeline # let check_config re-raise this
else:
stages = static_stages
new_sub_modules = set(stage
for stage in stages.itervalues()
if hasattr(stage, 'config_name'))
return NewSubModules(streamcorpus_pipeline, new_sub_modules)
|
python
|
{
"resource": ""
}
|
q278497
|
make_app
|
test
|
def make_app():
"""Make a WSGI app that has all the HTTPie pieces baked in."""
env = Environment()
# STDIN is ignored because HTTPony runs a server that doesn't care.
# Additionally, it is needed or else pytest blows up.
args = parser.parse_args(args=['/', '--ignore-stdin'], env=env)
args.output_options = 'HB' # Output only requests.
server = 'HTTPony/{0}'.format(__version__)
def application(environ, start_response):
# The WSGI server puts content length and type in the environment
# even when not provided with the request. Drop them if they are empty.
if environ.get('CONTENT_LENGTH') == '':
del environ['CONTENT_LENGTH']
if environ.get('CONTENT_TYPE') == '':
del environ['CONTENT_TYPE']
wrequest = WerkzeugRequest(environ)
data = wrequest.get_data()
request = Request(
method=wrequest.method,
url=wrequest.url,
headers=wrequest.headers,
data=data,
)
prepared = request.prepare()
stream = streams.build_output_stream(
args, env, prepared, response=None,
output_options=args.output_options)
streams.write_stream(stream, env.stdout, env.stdout_isatty)
# When there is data in the request, give the next one breathing room.
if data:
print("\n", file=env.stdout)
# Make dreams come true.
response = Response(headers={'Server': server})
return response(environ, start_response)
return application
|
python
|
{
"resource": ""
}
|
q278498
|
make_chains_with_names
|
test
|
def make_chains_with_names(sentences):
'''
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
'''
## if an equiv_id is -1, then the token is classified into some
## entity_type but has not other tokens in its chain. We don't
## want these all lumped together, so we give them distinct "fake"
## equiv_id other than -1 -- counting negatively to avoid
## collisions with "real" equiv_ids
fake_equiv_ids = -2
## use a default dictionary
equiv_ids = collections.defaultdict(lambda: (set(), set()))
for tagger_id, sents in sentences.items():
for sent in sents:
for tok in sent.tokens:
if tok.entity_type is not None:
## get an appropriate equiv_id
if tok.equiv_id == -1:
eqid = fake_equiv_ids
fake_equiv_ids -= 1
else:
eqid = tok.equiv_id
## store the name parts initially as a set
equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8')))
## carry a *reference* to the entire Token object
equiv_ids[eqid][1].add(tok)
return equiv_ids
|
python
|
{
"resource": ""
}
|
q278499
|
ALL_mentions
|
test
|
def ALL_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True only if all of the target_mention
strings appeared as substrings of at least one cleansed
Token.token. Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
found_all = True
for name in target_mentions:
found_one = False
for chain_ment in chain_mentions:
if name in chain_ment:
found_one = True
break
if not found_one:
found_all = False
break
return found_all
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.