text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_google_napoleon_docs(prnt_doc=None, child_doc=None):
""" Merge two google-style docstrings into a single docstring, according to napoleon docstring sections. Given the google-style docstrings from a parent and child's attributes, merge the docstring sections such that the child's section is used, wherever present, otherwise the parent's section is used. Any whitespace that can be uniformly removed from a docstring's second line and onwards is removed. Sections will be separated by a single blank line. Aliased docstring sections are normalized. E.g Args, Arguments -> Parameters Parameters prnt_doc: Optional[str] The docstring from the parent. child_doc: Optional[str] The docstring from the child. Returns ------- Union[str, None] The merged docstring. """ |
style = "google"
return merge_all_sections(parse_napoleon_doc(prnt_doc, style), parse_napoleon_doc(child_doc, style), style) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_mesh( x1, ngroups1, conns1, x2, ngroups2, conns2, cmap, eps = 1e-8 ):
"""Merge two meshes in common coordinates found in x1, x2.""" |
n1 = x1.shape[0]
n2 = x2.shape[0]
err = nm.sum( nm.sum( nm.abs( x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1] ) ) )
if abs( err ) > (10.0 * eps):
print 'nonmatching meshes!', err
raise ValueError
mask = nm.ones( (n2,), dtype = nm.int32 )
mask[cmap[:,1]] = 0
# print mask, nm.cumsum( mask )
remap = nm.cumsum( mask ) + n1 - 1
remap[cmap[:,1]] = cmap[:,0]
# print remap
i2 = nm.setdiff1d( nm.arange( n2, dtype = nm.int32 ),
cmap[:,1] )
xx = nm.r_[x1, x2[i2]]
ngroups = nm.r_[ngroups1, ngroups2[i2]]
conns = []
for ii in xrange( len( conns1 ) ):
conn = nm.vstack( (conns1[ii], remap[conns2[ii]]) )
conns.append( conn )
return xx, ngroups, conns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_double_nodes(coor, ngroups, conns, eps):
""" Detect and attempt fixing double nodes in a mesh. The double nodes are nodes having the same coordinates w.r.t. precision given by `eps`. """ |
n_nod, dim = coor.shape
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps, allow_double = True )
if cmap.size:
output('double nodes in input mesh!')
output('trying to fix...')
while cmap.size:
print cmap.size
# Just like in Variable.equation_mapping()...
ii = nm.argsort( cmap[:,1] )
scmap = cmap[ii]
eq = nm.arange( n_nod )
eq[scmap[:,1]] = -1
eqi = eq[eq >= 0]
eq[eqi] = nm.arange( eqi.shape[0] )
remap = eq.copy()
remap[scmap[:,1]] = eq[scmap[:,0]]
print coor.shape
coor = coor[eqi]
ngroups = ngroups[eqi]
print coor.shape
ccs = []
for conn in conns:
ccs.append( remap[conn] )
conns = ccs
cmap = find_map( coor, nm.zeros( (0,dim) ), eps = eps,
allow_double = True )
output('...done')
return coor, ngroups, conns |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_min_edge_size(coor, conns):
""" Get the smallest edge length. """ |
mes = 1e16
for conn in conns:
n_ep = conn.shape[1]
for ir in range( n_ep ):
x1 = coor[conn[:,ir]]
for ic in range( ir + 1, n_ep ):
x2 = coor[conn[:,ic]]
aux = nm.sqrt( nm.sum( (x2 - x1)**2.0, axis = 1 ).min() )
mes = min( mes, aux )
return mes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_min_vertex_distance( coor, guess ):
"""Can miss the minimum, but is enough for our purposes.""" |
# Sort by x.
ix = nm.argsort( coor[:,0] )
scoor = coor[ix]
mvd = 1e16
# Get mvd in chunks potentially smaller than guess.
n_coor = coor.shape[0]
print n_coor
i0 = i1 = 0
x0 = scoor[i0,0]
while 1:
while ((scoor[i1,0] - x0) < guess) and (i1 < (n_coor - 1)):
i1 += 1
# print i0, i1, x0, scoor[i1,0]
aim, aa1, aa2, aux = get_min_vertex_distance_naive( scoor[i0:i1+1] )
if aux < mvd:
im, a1, a2 = aim, aa1 + i0, aa2 + i0
mvd = min( mvd, aux )
i0 = i1 = int( 0.5 * (i1 + i0 ) ) + 1
# i0 += 1
x0 = scoor[i0,0]
# print '-', i0
if i1 == n_coor - 1: break
print im, ix[a1], ix[a2], a1, a2, scoor[a1], scoor[a2]
return mvd |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_mesh( coor, ngroups, conns, mesh_in ):
"""Create a mesh reusing mat_ids and descs of mesh_in.""" |
mat_ids = []
for ii, conn in enumerate( conns ):
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( mesh_in.mat_ids[ii][0] )
mat_ids.append( mat_id )
mesh_out = Mesh.from_data( 'merged mesh', coor, ngroups, conns,
mat_ids, mesh_in.descs )
return mesh_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_inverse_connectivity(conns, n_nod, ret_offsets=True):
""" For each mesh node referenced in the connectivity conns, make a list of elements it belongs to. """ |
from itertools import chain
iconn = [[] for ii in xrange( n_nod )]
n_els = [0] * n_nod
for ig, conn in enumerate( conns ):
for iel, row in enumerate( conn ):
for node in row:
iconn[node].extend([ig, iel])
n_els[node] += 1
n_els = nm.array(n_els, dtype=nm.int32)
iconn = nm.fromiter(chain(*iconn), nm.int32)
if ret_offsets:
offsets = nm.cumsum(nm.r_[0, n_els], dtype=nm.int32)
return offsets, iconn
else:
return n_els, iconn |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_surface( surf_faces, mesh_in ):
""" Create a mesh given a set of surface faces and the original mesh. """ |
aux = nm.concatenate([faces.ravel() for faces in surf_faces])
inod = nm.unique(aux)
n_nod = len( inod )
n_nod_m, dim = mesh_in.coors.shape
aux = nm.arange( n_nod, dtype=nm.int32 )
remap = nm.zeros( (n_nod_m,), nm.int32 )
remap[inod] = aux
mesh = Mesh( mesh_in.name + "_surf" )
mesh.coors = mesh_in.coors[inod]
mesh.ngroups = mesh_in.ngroups[inod]
sfm = {3 : "2_3", 4 : "2_4"}
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
for ii, sf in enumerate( surf_faces ):
n_el, n_fp = sf.shape
conn = remap[sf]
mat_id = nm.empty( (conn.shape[0],), dtype = nm.int32 )
mat_id.fill( ii )
mesh.descs.append( sfm[n_fp] )
mesh.conns.append( conn )
mesh.mat_ids.append( mat_id )
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(filename=None, io='auto', prefix_dir=None, omit_facets=False):
""" Read a mesh from a file. Parameters filename : string or function or MeshIO instance or Mesh instance The name of file to read the mesh from. For convenience, a mesh creation function or a MeshIO instance or directly a Mesh instance can be passed in place of the file name. io : *MeshIO instance Passing *MeshIO instance has precedence over filename. prefix_dir : str If not None, the filename is relative to that directory. omit_facets : bool If True, do not read cells of lower dimension than the space dimension (faces and/or edges). Only some MeshIO subclasses support this! """ |
if isinstance(filename, Mesh):
return filename
if io == 'auto':
if filename is None:
output( 'filename or io must be specified!' )
raise ValueError
else:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
output('reading mesh (%s)...' % (io.filename))
tt = time.clock()
trunk = io.get_filename_trunk()
mesh = Mesh(trunk)
mesh = io.read(mesh, omit_facets=omit_facets)
output('...done in %.2f s' % (time.clock() - tt))
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_region(region, mesh_in, save_edges=False, save_faces=False, localize=False, is_surface=False):
""" Create a mesh corresponding to a given region. """ |
mesh = Mesh( mesh_in.name + "_reg" )
mesh.coors = mesh_in.coors.copy()
mesh.ngroups = mesh_in.ngroups.copy()
mesh.conns = []
mesh.descs = []
mesh.mat_ids = []
if not is_surface:
if region.has_cells():
for ig in region.igs:
mesh.descs.append( mesh_in.descs[ig] )
els = region.get_cells( ig )
mesh.mat_ids.append( mesh_in.mat_ids[ig][els,:].copy() )
mesh.conns.append( mesh_in.conns[ig][els,:].copy() )
if save_edges:
ed = region.domain.ed
for ig in region.igs:
edges = region.get_edges( ig )
mesh.descs.append( '1_2' )
mesh.mat_ids.append( ed.data[edges,0] + 1 )
mesh.conns.append( ed.data[edges,-2:].copy() )
if save_faces:
mesh._append_region_faces(region)
if save_edges or save_faces:
mesh.descs.append( {2 : '2_3', 3 : '3_4'}[mesh_in.dim] )
mesh.mat_ids.append( -nm.ones_like( region.all_vertices ) )
mesh.conns.append(make_point_cells(region.all_vertices,
mesh_in.dim))
else:
mesh._append_region_faces(region, force_faces=True)
mesh._set_shape_info()
if localize:
mesh.localize( region.all_vertices )
return mesh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_data( name, coors, ngroups, conns, mat_ids, descs, igs = None ):
""" Create a mesh from mesh data. """ |
if igs is None:
igs = range( len( conns ) )
mesh = Mesh(name)
mesh._set_data(coors = coors,
ngroups = ngroups,
conns = [conns[ig] for ig in igs],
mat_ids = [mat_ids[ig] for ig in igs],
descs = [descs[ig] for ig in igs])
mesh._set_shape_info()
return mesh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self, name=None):
"""Make a deep copy of self. Parameters name : str Name of the copied mesh. """ |
return Struct.copy(self, deep=True, name=name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None):
""" Set mesh data. Parameters coors : array Coordinates of mesh nodes. ngroups : array Node groups. conns : list of arrays The array of mesh elements (connectivities) for each element group. mat_ids : list of arrays The array of material ids for each element group. descs: list of strings The element type for each element group. nodal_bcs : dict of arrays, optional The nodes defining regions for boundary conditions referred to by the dict keys in problem description files. """ |
self.coors = nm.ascontiguousarray(coors)
if ngroups is None:
self.ngroups = nm.zeros((self.coors.shape[0],), dtype=nm.int32)
else:
self.ngroups = nm.ascontiguousarray(ngroups)
self.conns = [nm.asarray(conn, dtype=nm.int32) for conn in conns]
self.mat_ids = [nm.asarray(mat_id, dtype=nm.int32)
for mat_id in mat_ids]
self.descs = descs
self.nodal_bcs = get_default(nodal_bcs, {}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, filename=None, io=None, coors=None, igs=None, out=None, float_format=None, **kwargs):
""" Write mesh + optional results in `out` to a file. Parameters filename : str, optional The file name. If None, the mesh name is used instead. io : MeshIO instance or 'auto', optional Passing 'auto' respects the extension of `filename`. coors : array, optional The coordinates that can be used instead of the mesh coordinates. igs : array_like, optional Passing a list of group ids selects only those groups for writing. out : dict, optional The output data attached to the mesh vertices and/or cells. float_format : str, optional The format string used to print floats in case of a text file format. **kwargs : dict, optional Additional arguments that can be passed to the `MeshIO` instance. """ |
if filename is None:
filename = self.name + '.mesh'
if io is None:
io = self.io
if io is None:
io = 'auto'
if io == 'auto':
io = MeshIO.any_from_filename( filename )
if coors is None:
coors = self.coors
if igs is None:
igs = range( len( self.conns ) )
aux_mesh = Mesh.from_data( self.name, coors, self.ngroups,
self.conns, self.mat_ids, self.descs, igs )
io.set_float_format( float_format )
io.write( filename, aux_mesh, out, **kwargs ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_element_coors(self, ig=None):
""" Get the coordinates of vertices elements in group `ig`. Parameters ig : int, optional The element group. If None, the coordinates for all groups are returned, filled with zeros at places of missing vertices, i.e. where elements having less then the full number of vertices (`n_ep_max`) are. Returns ------- coors : array The coordinates in an array of shape `(n_el, n_ep_max, dim)`. """ |
cc = self.coors
n_ep_max = self.n_e_ps.max()
coors = nm.empty((self.n_el, n_ep_max, self.dim), dtype=cc.dtype)
for ig, conn in enumerate(self.conns):
i1, i2 = self.el_offsets[ig], self.el_offsets[ig + 1]
coors[i1:i2, :conn.shape[1], :] = cc[conn]
return coors |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform_coors(self, mtx_t, ref_coors=None):
""" Transform coordinates of the mesh by the given transformation matrix. Parameters mtx_t : array The transformation matrix `T` (2D array). It is applied depending on its shape: - `(dim, dim):
x = T * x` - `(dim, dim + 1):
x = T[:, :-1] * x + T[:, -1]` ref_coors : array, optional Alternative coordinates to use for the transformation instead of the mesh coordinates, with the same shape as `self.coors`. """ |
if ref_coors is None:
ref_coors = self.coors
if mtx_t.shape[1] > self.coors.shape[1]:
self.coors[:] = nm.dot(ref_coors, mtx_t[:,:-1].T) + mtx_t[:,-1]
else:
self.coors[:] = nm.dot(ref_coors, mtx_t.T) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def explode_groups(self, eps, return_emap=False):
""" Explode the mesh element groups by `eps`, i.e. split group interface nodes and shrink each group towards its centre by `eps`. Parameters eps : float in `[0.0, 1.0]` The group shrinking factor. return_emap : bool, optional If True, also return the mapping against original mesh coordinates that result in the exploded mesh coordinates. The mapping can be used to map mesh vertex data to the exploded mesh vertices. Returns ------- mesh : Mesh The new mesh with exploded groups. emap : spmatrix, optional The maping for exploding vertex values. Only provided if `return_emap` is True. """ |
assert_(0.0 <= eps <= 1.0)
remap = nm.empty((self.n_nod,), dtype=nm.int32)
offset = 0
if return_emap:
rows, cols = [], []
coors = []
ngroups = []
conns = []
mat_ids = []
descs = []
for ig, conn in enumerate(self.conns):
nodes = nm.unique(conn)
group_coors = self.coors[nodes]
n_nod = group_coors.shape[0]
centre = group_coors.sum(axis=0) / float(n_nod)
vectors = group_coors - centre[None, :]
new_coors = centre + (vectors * eps)
remap[nodes] = nm.arange(n_nod, dtype=nm.int32) + offset
new_conn = remap[conn]
coors.append(new_coors)
ngroups.append(self.ngroups[nodes])
conns.append(new_conn)
mat_ids.append(self.mat_ids[ig])
descs.append(self.descs[ig])
offset += n_nod
if return_emap:
cols.append(nodes)
rows.append(remap[nodes])
coors = nm.concatenate(coors, axis=0)
ngroups = nm.concatenate(ngroups, axis=0)
mesh = Mesh.from_data('exploded_' + self.name,
coors, ngroups, conns, mat_ids, descs)
if return_emap:
rows = nm.concatenate(rows)
cols = nm.concatenate(cols)
data = nm.ones(rows.shape[0], dtype=nm.float64)
emap = sp.coo_matrix((data, (rows, cols)),
shape=(mesh.n_nod, self.n_nod))
return mesh, emap
else:
return mesh |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join_conn_groups( conns, descs, mat_ids, concat = False ):
"""Join groups of the same element type.""" |
el = dict_from_keys_init( descs, list )
for ig, desc in enumerate( descs ):
el[desc].append( ig )
groups = [ii for ii in el.values() if ii]
## print el, groups
descs_out, conns_out, mat_ids_out = [], [], []
for group in groups:
n_ep = conns[group[0]].shape[1]
conn = nm.zeros( (0, n_ep), nm.int32 )
mat_id = nm.zeros( (0,), nm.int32 )
for ig in group:
conn = nm.concatenate( (conn, conns[ig]) )
mat_id = nm.concatenate( (mat_id, mat_ids[ig]) )
if concat:
conn = nm.concatenate( (conn, mat_id[:,nm.newaxis]), 1 )
else:
mat_ids_out.append( mat_id )
conns_out.append( conn )
descs_out.append( descs[group[0]] )
if concat:
return conns_out, descs_out
else:
return conns_out, descs_out, mat_ids_out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_complex_output(out_in):
""" Convert complex values in the output dictionary `out_in` to pairs of real and imaginary parts. """ |
out = {}
for key, val in out_in.iteritems():
if val.data.dtype in complex_types:
rval = copy(val)
rval.data = val.data.real
out['real(%s)' % key] = rval
ival = copy(val)
ival.data = val.data.imag
out['imag(%s)' % key] = ival
else:
out[key] = val
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_format( filename, ext, formats, io_table ):
""" Guess the format of filename, candidates are in formats. """ |
ok = False
for format in formats:
output( 'guessing %s' % format )
try:
ok = io_table[format].guess( filename )
except AttributeError:
pass
if ok: break
else:
raise NotImplementedError('cannot guess format of a *%s file!' % ext)
return format |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def any_from_filename(filename, prefix_dir=None):
""" Create a MeshIO instance according to the kind of `filename`. Parameters filename : str, function or MeshIO subclass instance The name of the mesh file. It can be also a user-supplied function accepting two arguments: `mesh`, `mode`, where `mesh` is a Mesh instance and `mode` is one of 'read','write', or a MeshIO subclass instance. prefix_dir : str The directory name to prepend to `filename`. Returns ------- io : MeshIO subclass instance The MeshIO subclass instance corresponding to the kind of `filename`. """ |
if not isinstance(filename, basestr):
if isinstance(filename, MeshIO):
return filename
else:
return UserMeshIO(filename)
ext = op.splitext(filename)[1].lower()
try:
format = supported_formats[ext]
except KeyError:
raise ValueError('unsupported mesh file suffix! (%s)' % ext)
if isinstance(format, tuple):
format = guess_format(filename, ext, format, io_table)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
return io_table[format](filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def for_format(filename, format=None, writable=False, prefix_dir=None):
""" Create a MeshIO instance for file `filename` with forced `format`. Parameters filename : str The name of the mesh file. format : str One of supported formats. If None, :func:`MeshIO.any_from_filename()` is called instead. writable : bool If True, verify that the mesh format is writable. prefix_dir : str The directory name to prepend to `filename`. Returns ------- io : MeshIO subclass instance The MeshIO subclass instance corresponding to the `format`. """ |
ext = op.splitext(filename)[1].lower()
try:
_format = supported_formats[ext]
except KeyError:
_format = None
format = get_default(format, _format)
if format is None:
io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir)
else:
if not isinstance(format, basestr):
raise ValueError('ambigous suffix! (%s -> %s)' % (ext, format))
if format not in io_table:
raise ValueError('unknown output mesh format! (%s)' % format)
if writable and ('w' not in supported_capabilities[format]):
output_writable_meshes()
msg = 'write support not implemented for output mesh format "%s",' \
' see above!' \
% format
raise ValueError(msg)
if prefix_dir is not None:
filename = op.normpath(op.join(prefix_dir, filename))
io = io_table[format](filename)
return io |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_data( self, step, filename = None ):
"""Point data only!""" |
filename = get_default( filename, self.filename )
out = {}
fd = open( self.filename, 'r' )
while 1:
line = skip_read_line(fd, no_eof=True).split()
if line[0] == 'POINT_DATA':
break
n_nod = int(line[1])
while 1:
line = skip_read_line(fd)
if not line:
break
line = line.split()
if line[0] == 'SCALARS':
name, dtype, nc = line[1:]
assert_(int(nc) == 1)
fd.readline() # skip lookup table line
data = nm.zeros((n_nod,), dtype=nm.float64)
ii = 0
while ii < n_nod:
data[ii] = float(fd.readline())
ii += 1
out[name] = Struct( name = name,
mode = 'vertex',
data = data,
dofs = None )
elif line[0] == 'VECTORS':
name, dtype = line[1:]
data = []
ii = 0
while ii < n_nod:
data.append([float(val) for val in fd.readline().split()])
ii += 1
out[name] = Struct( name = name,
mode = 'vertex',
data = nm.array(data, dtype=nm.float64),
dofs = None )
elif line[0] == 'CELL_DATA':
break
line = fd.readline()
fd.close()
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_section(self, f, integer=True):
""" Reads one section from the mesh3d file. float(), before returning Some examples how a section can look like: 2 1 2 5 4 7 8 11 10 2 3 6 5 8 9 12 11 or 5 1 2 3 4 1 1 2 6 5 1 2 3 7 6 1 3 4 8 7 1 4 1 5 8 1 or 0 """ |
if integer:
dtype=int
else:
dtype=float
l = self._read_line(f)
N = int(l)
rows = []
for i in range(N):
l = self._read_line(f)
row = nm.fromstring(l, sep=" ", dtype=dtype)
rows.append(row)
return nm.array(rows) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(cls):
""" Return always the same instance of the backend class """ |
if cls not in cls._instances:
cls._instances[cls] = cls()
return cls._instances[cls] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peek(self):
""" Returns PeekableIterator.Nothing when the iterator is exhausted. """ |
try:
v = next(self._iter)
self._iter = itertools.chain((v,), self._iter)
return v
except StopIteration:
return PeekableIterator.Nothing |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _append_to_parent(self):
""" Causes this ephemeral table to be persisted on the TOMLFile. """ |
if self.__appended:
return
if self._parent is not None:
self._parent.append_fresh_table(self)
self.__appended = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def access_token(self, value, request):
""" Try to get the `AccessToken` associated with the provided token. *The provided value must pass `BearerHandler.validate()`* """ |
if self.validate(value, request) is not None:
return None
access_token = AccessToken.objects.for_token(value)
return access_token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self, value, request):
""" Try to get the `AccessToken` associated with the given token. The return value is determined based n a few things: - If no token is provided (`value` is None), a 400 response will be returned. - If an invalid token is provided, a 401 response will be returned. - If the token provided is valid, `None` will be returned. """ |
from django.http import HttpResponseBadRequest
from doac.http import HttpResponseUnauthorized
if not value:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(CredentialsNotProvided)
return response
try:
access_token = AccessToken.objects.for_token(value)
except AccessToken.DoesNotExist:
response = HttpResponseUnauthorized()
response["WWW-Authenticate"] = request_error_header(InvalidToken)
return response
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_friends(self, user):
""" fetches the friends from twitter using the information on django-social-auth models user is an instance of UserSocialAuth Returns: collection of friend objects fetched from facebook """ |
# Fetch the token key and secret
if USING_ALLAUTH:
social_app = SocialApp.objects.get_current('twitter')
consumer_key = social_app.key
consumer_secret = social_app.secret
oauth_token = SocialToken.objects.get(account=user, app=social_app).token
oauth_token_secret = SocialToken.objects.get(account=user, app=social_app).token_secret
else:
t = TwitterBackend()
tokens = t.tokens(user)
oauth_token_secret = tokens['oauth_token_secret']
oauth_token = tokens['oauth_token']
# Consumer key and secret from settings
consumer_key = settings.TWITTER_CONSUMER_KEY
consumer_secret = settings.TWITTER_CONSUMER_SECRET
# now fetch the twitter friends using `python-twitter`
api = twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=oauth_token,
access_token_secret=oauth_token_secret
)
return api.GetFriends() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fetch_friend_ids(self, user):
""" fethces friend id's from twitter Return: collection of friend ids """ |
friends = self.fetch_friends(user)
friend_ids = []
for friend in friends:
friend_ids.append(friend.id)
return friend_ids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_section(key, prnt_sec, child_sec):
""" Synthesize a output numpy docstring section. Parameters key: str The numpy-section being merged. prnt_sec: Optional[str] The docstring section from the parent's attribute. child_sec: Optional[str] The docstring section from the child's attribute. Returns ------- Optional[str] The output docstring section.""" |
if prnt_sec is None and child_sec is None:
return None
if key == "Short Summary":
header = ''
else:
header = "\n".join((key, "".join("-" for i in range(len(key))), ""))
if child_sec is None:
body = prnt_sec
else:
body = child_sec
return header + body |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def structure(table_toplevels):
""" Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the TOML file. """ |
table_toplevels = tuple(table_toplevels)
obj = NamedDict()
last_array_of_tables = None # The Name of the last array-of-tables header
for toplevel in table_toplevels:
if isinstance(toplevel, toplevels.AnonymousTable):
obj[''] = toplevel.table_element
elif isinstance(toplevel, toplevels.Table):
if last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
seq[-1] = CascadeDict(seq[-1], NamedDict({unprefixed_name: toplevel.table_element}))
else:
obj[toplevel.name] = toplevel.table_element
else: # It's an ArrayOfTables
if last_array_of_tables and toplevel.name != last_array_of_tables and \
toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
if unprefixed_name in seq[-1]:
seq[-1][unprefixed_name].append(toplevel.table_element)
else:
cascaded_with = NamedDict({unprefixed_name: [toplevel.table_element]})
seq[-1] = CascadeDict(seq[-1], cascaded_with)
else:
obj.append(toplevel.name, toplevel.table_element)
last_array_of_tables = toplevel.name
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prune_old_authorization_codes():
""" Removes all unused and expired authorization codes from the database. """ |
from .compat import now
from .models import AuthorizationCode
AuthorizationCode.objects.with_expiration_before(now()).delete() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_handler(handler_name):
""" Imports the module for a DOAC handler based on the string representation of the module path that is provided. """ |
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request_error_header(exception):
""" Generates the error header for a request using a Bearer token based on a given OAuth exception. """ |
from .conf import options
header = "Bearer realm=\"%s\"" % (options.realm, )
if hasattr(exception, "error"):
header = header + ", error=\"%s\"" % (exception.error, )
if hasattr(exception, "reason"):
header = header + ", error_description=\"%s\"" % (exception.reason, )
return header |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_default_pos(self, defaultPos):
"""Set the default starting location of our character.""" |
self.coords = defaultPos
self.velocity = r.Vector2()
self.desired_position = defaultPos
r.Ragnarok.get_world().Camera.pan = self.coords
r.Ragnarok.get_world().Camera.desired_pan = self.coords |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __generate_location(self):
""" Reset the location of the cloud once it has left the viewable area of the screen. """ |
screen_width = world.get_backbuffer_size().X
self.movement_speed = random.randrange(10, 25)
# This line of code places the cloud to the right of the viewable screen, so it appears to
# gradually move in from the right instead of randomally appearing on some portion of the viewable
# window.
self.coords = R.Vector2(screen_width + self.image.get_width(), random.randrange(0, 100)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_delimiter(line):
""" True if a line consists only of a single punctuation character.""" |
return bool(line) and line[0] in punctuation and line[0]*len(line) == line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_rest_doc(doc):
""" Extract the headers, delimiters, and text from reST-formatted docstrings. Parameters doc: Union[str, None] Returns ------- Dict[str, Section] """ |
class Section(object):
def __init__(self, header=None, body=None):
self.header = header # str
self.body = body # str
doc_sections = OrderedDict([('', Section(header=''))])
if not doc:
return doc_sections
doc = cleandoc(doc)
lines = iter(doc.splitlines())
header = ''
body = []
section = Section(header=header)
line = ''
while True:
try:
prev_line = line
line = next(lines)
# section header encountered
if is_delimiter(line) and 0 < len(prev_line) <= len(line):
# prev-prev-line is overline
if len(body) >= 2 and len(body[-2]) == len(line) \
and body[-2][0] == line[0] and is_delimiter(body[-2]):
lim = -2
else:
lim = -1
section.body = "\n".join(body[:lim]).rstrip()
doc_sections.update([(header.strip(), section)])
section = Section(header="\n".join(body[lim:] + [line]))
header = prev_line
body = []
line = ''
else:
body.append(line)
except StopIteration:
section.body = "\n".join(body).rstrip()
doc_sections.update([(header.strip(), section)])
break
return doc_sections |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_rest_docs(prnt_doc=None, child_doc=None):
""" See custom_inherit.style_store.reST for details. """ |
prnt_sections = parse_rest_doc(prnt_doc)
child_sections = parse_rest_doc(child_doc)
header = prnt_sections['']
prnt_sections.update(child_sections)
if not child_sections[''].body:
prnt_sections[''] = header
if not header.body:
prnt_sections.popitem(last=False)
return "\n\n".join(("\n".join((x.header, x.body)) for x in prnt_sections.values())).lstrip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_strings(content="", filename=None):
"""Parse an apple .strings file and create a stringset with all entries in the file. See http://developer.apple.com/library/mac/#documentation/MacOSX/Conceptual/BPInternational/Articles/StringsFiles.html for details. """ |
if filename is not None:
content = _get_content(filename=filename)
stringset = []
f = content
if f.startswith(u'\ufeff'):
f = f.lstrip(u'\ufeff')
#regex for finding all comments in a file
cp = r'(?:/\*(?P<comment>(?:[^*]|(?:\*+[^*/]))*\**)\*/)'
p = re.compile(r'(?:%s[ \t]*[\n]|[\r\n]|[\r]){0,1}(?P<line>(("(?P<key>[^"\\]*(?:\\.[^"\\]*)*)")|(?P<property>\w+))\s*=\s*"(?P<value>[^"\\]*(?:\\.[^"\\]*)*)"\s*;)'%cp, re.DOTALL|re.U)
#c = re.compile(r'\s*/\*(.|\s)*?\*/\s*', re.U)
c = re.compile(r'//[^\n]*\n|/\*(?:.|[\r\n])*?\*/', re.U)
ws = re.compile(r'\s+', re.U)
end=0
start = 0
for i in p.finditer(f):
start = i.start('line')
end_ = i.end()
key = i.group('key')
comment = i.group('comment') or ''
if not key:
key = i.group('property')
value = i.group('value')
while end < start:
m = c.match(f, end, start) or ws.match(f, end, start)
if not m or m.start() != end:
print("Invalid syntax: %s" %\
f[end:start])
end = m.end()
end = end_
key = _unescape_key(key)
stringset.append({'key': key, 'value': _unescape(value), 'comment': comment})
return stringset |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def identify(file_elements):
""" Outputs an ordered sequence of instances of TopLevel types. Elements start with an optional TableElement, followed by zero or more pairs of (TableHeaderElement, TableElement). """ |
if not file_elements:
return
_validate_file_elements(file_elements)
# An iterator over enumerate(the non-metadata) elements
iterator = PeekableIterator((element_i, element) for (element_i, element) in enumerate(file_elements)
if element.type != elements.TYPE_METADATA)
try:
_, first_element = iterator.peek()
if isinstance(first_element, TableElement):
iterator.next()
yield AnonymousTable(first_element)
except KeyError:
pass
except StopIteration:
return
for element_i, element in iterator:
if not isinstance(element, TableHeaderElement):
continue
# If TableHeader of a regular table, return Table following it
if not element.is_array_of_tables:
table_element_i, table_element = next(iterator)
yield Table(names=element.names, table_element=table_element)
# If TableHeader of an array of tables, do your thing
else:
table_element_i, table_element = next(iterator)
yield ArrayOfTables(names=element.names, table_element=table_element) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_character(tile_map, gamescreen):
"""Create an instance of the main character and return it.""" |
tile_obj = thc.TileHeroCharacter(tile_map, gamescreen)
tile_obj.load_texture("..//Textures//character.png")
tile_obj.origin = r.Vector2(0, 0)
tile_obj.hazard_touched_method = hazard_touched_method
tile_obj.special_touched_method = special_touched_method
return tile_obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_uri(self):
"""The authorzation URL that should be provided to the user""" |
return self.oauth.auth_uri(redirect_uri=self.redirect_uri, scope=self.scope) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse_dailydeviations(self):
"""Retrieves Daily Deviations""" |
response = self._req('/browse/dailydeviations')
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return deviations |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse_userjournals(self, username, featured=False, offset=0, limit=10):
"""Fetch user journals from user :param username: name of user to retrieve journals from :param featured: fetch only featured or not :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/browse/user/journals', {
"username":username,
"featured":featured,
"offset":offset,
"limit":limit
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse_morelikethis_preview(self, seed):
"""Fetch More Like This preview result for a seed deviation :param seed: The deviationid to fetch more like """ |
response = self._req('/browse/morelikethis/preview', {
"seed":seed
})
returned_seed = response['seed']
author = User()
author.from_dict(response['author'])
more_from_artist = []
for item in response['more_from_artist']:
d = Deviation()
d.from_dict(item)
more_from_artist.append(d)
more_from_da = []
for item in response['more_from_da']:
d = Deviation()
d.from_dict(item)
more_from_da.append(d)
return {
"seed" : returned_seed,
"author" : author,
"more_from_artist" : more_from_artist,
"more_from_da" : more_from_da
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def browse(self, endpoint="hot", category_path="", seed="", q="", timerange="24hr", tag="", offset=0, limit=10):
"""Fetch deviations from public endpoints :param endpoint: The endpoint from which the deviations will be fetched (hot/morelikethis/newest/undiscovered/popular/tags) :param category_path: category path to fetch from :param q: Search query term :param timerange: The timerange :param tag: The tag to browse :param offset: the pagination offset :param limit: the pagination limit """ |
if endpoint == "hot":
response = self._req('/browse/hot', {
"category_path":category_path,
"offset":offset,
"limit":limit
})
elif endpoint == "morelikethis":
if seed:
response = self._req('/browse/morelikethis', {
"seed":seed,
"category_path":category_path,
"offset":offset,
"limit":limit
})
else:
raise DeviantartError("No seed defined.")
elif endpoint == "newest":
response = self._req('/browse/newest', {
"category_path":category_path,
"q":q,
"offset":offset,
"limit":limit
})
elif endpoint == "undiscovered":
response = self._req('/browse/undiscovered', {
"category_path":category_path,
"offset":offset,
"limit":limit
})
elif endpoint == "popular":
response = self._req('/browse/popular', {
"category_path":category_path,
"q":q,
"timerange":timerange,
"offset":offset,
"limit":limit
})
elif endpoint == "tags":
if tag:
response = self._req('/browse/tags', {
"tag":tag,
"offset":offset,
"limit":limit
})
else:
raise DeviantartError("No tag defined.")
else:
raise DeviantartError("Unknown endpoint.")
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_categories(self, catpath="/"):
"""Fetch the categorytree :param catpath: The category to list children of """ |
response = self._req('/browse/categorytree', {
"catpath":catpath
})
categories = response['categories']
return categories |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_tags(self, tag_name):
"""Searches for tags :param tag_name: Partial tag name to get autocomplete suggestions for """ |
response = self._req('/browse/tags/search', {
"tag_name":tag_name
})
tags = list()
for item in response['results']:
tags.append(item['tag_name'])
return tags |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deviation(self, deviationid):
"""Fetch a single deviation :param deviationid: The deviationid you want to fetch """ |
response = self._req('/deviation/{}'.format(deviationid))
d = Deviation()
d.from_dict(response)
return d |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def whofaved_deviation(self, deviationid, offset=0, limit=10):
"""Fetch a list of users who faved the deviation :param deviationid: The deviationid you want to fetch :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/deviation/whofaved', get_data={
'deviationid' : deviationid,
'offset' : offset,
'limit' : limit
})
users = []
for item in response['results']:
u = {}
u['user'] = User()
u['user'].from_dict(item['user'])
u['time'] = item['time']
users.append(u)
return {
"results" : users,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deviation_metadata(self, deviationids, ext_submission=False, ext_camera=False, ext_stats=False, ext_collection=False):
"""Fetch deviation metadata for a set of deviations :param deviationid: The deviationid you want to fetch :param ext_submission: Return extended information - submission information :param ext_camera: Return extended information - EXIF information (if available) :param ext_stats: Return extended information - deviation statistics :param ext_collection: Return extended information - favourited folder information """ |
response = self._req('/deviation/metadata', {
'ext_submission' : ext_submission,
'ext_camera' : ext_camera,
'ext_stats' : ext_stats,
'ext_collection' : ext_collection
},
post_data={
'deviationids[]' : deviationids
})
metadata = []
for item in response['metadata']:
m = {}
m['deviationid'] = item['deviationid']
m['printid'] = item['printid']
m['author'] = User()
m['author'].from_dict(item['author'])
m['is_watching'] = item['is_watching']
m['title'] = item['title']
m['description'] = item['description']
m['license'] = item['license']
m['allows_comments'] = item['allows_comments']
m['tags'] = item['tags']
m['is_favourited'] = item['is_favourited']
m['is_mature'] = item['is_mature']
if "submission" in item:
m['submission'] = item['submission']
if "camera" in item:
m['camera'] = item['camera']
if "collections" in item:
m['collections'] = item['collections']
metadata.append(m)
return metadata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deviation_embeddedcontent(self, deviationid, offset_deviationid="", offset=0, limit=10):
"""Fetch content embedded in a deviation :param deviationid: The deviationid of container deviation :param offset_deviationid: UUID of embedded deviation to use as an offset :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/deviation/embeddedcontent', {
'deviationid' : deviationid,
'offset_deviationid' : offset_deviationid,
'offset' : 0,
'limit' : 0
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
return {
"results" : deviations,
"has_less" : response['has_less'],
"has_more" : response['has_more'],
"prev_offset" : response['prev_offset'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_deviation_content(self, deviationid):
"""Fetch full data that is not included in the main devaition object The endpoint works with journals and literatures. Deviation objects returned from API contain only excerpt of a journal, use this endpoint to load full content. Any custom CSS rules and fonts applied to journal are also returned. :param deviationid: UUID of the deviation to fetch full data for """ |
response = self._req('/deviation/content', {
'deviationid':deviationid
})
content = {}
if "html" in response:
content['html'] = response['html']
if "css" in response:
content['css'] = response['css']
if "css_fonts" in response:
content['css_fonts'] = response['css_fonts']
return content |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10):
"""Fetch collection folders :param username: The user to list folders for, if omitted the authenticated user is used :param calculate_size: The option to include the content count per each collection folder :param ext_preload: Include first 5 deviations from the folder :param offset: the pagination offset :param limit: the pagination limit """ |
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/folders', {
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/folders', {
"username":username,
"calculate_size":calculate_size,
"ext_preload":ext_preload,
"offset":offset,
"limit":limit
})
folders = []
for item in response['results']:
f = {}
f['folderid'] = item['folderid']
f['name'] = item['name']
if "size" in item:
f['size'] = item['size']
if "deviations" in item:
f['deviations'] = []
for deviation_item in item['deviations']:
d = Deviation()
d.from_dict(deviation_item)
f['deviations'].append(d)
folders.append(f)
return {
"results" : folders,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_collection(self, folderid, username="", offset=0, limit=10):
"""Fetch collection folder contents :param folderid: UUID of the folder to list :param username: The user to list folders for, if omitted the authenticated user is used :param offset: the pagination offset :param limit: the pagination limit """ |
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/collections/{}'.format(folderid), {
"offset":offset,
"limit":limit
})
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/collections/{}'.format(folderid), {
"username":username,
"offset":offset,
"limit":limit
})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
if "name" in response:
name = response['name']
else:
name = None
return {
"results" : deviations,
"name" : name,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fave(self, deviationid, folderid=""):
"""Add deviation to favourites :param deviationid: Id of the Deviation to favourite :param folderid: Optional UUID of the Collection folder to add the favourite into """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
post_data['deviationid'] = deviationid
if folderid:
post_data['folderid'] = folderid
response = self._req('/collections/fave', post_data = post_data)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_gallery_all(self, username='', offset=0, limit=10):
""" Get all of a user's deviations :param username: The user to query, defaults to current user :param offset: the pagination offset :param limit: the pagination limit """ |
if not username:
raise DeviantartError('No username defined.')
response = self._req('/gallery/all', {'username': username,
'offset': offset,
'limit': limit})
deviations = []
for item in response['results']:
d = Deviation()
d.from_dict(item)
deviations.append(d)
if "name" in response:
name = response['name']
else:
name = None
return {
"results": deviations,
"name": name,
"has_more": response['has_more'],
"next_offset": response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user(self, username="", ext_collections=False, ext_galleries=False):
"""Get user profile information :param username: username to lookup profile of :param ext_collections: Include collection folder info :param ext_galleries: Include gallery folder info """ |
if not username and self.standard_grant_type == "authorization_code":
response = self._req('/user/whoami')
u = User()
u.from_dict(response)
else:
if not username:
raise DeviantartError("No username defined.")
else:
response = self._req('/user/profile/{}'.format(username), {
'ext_collections' : ext_collections,
'ext_galleries' : ext_galleries
})
u = User()
u.from_dict(response['user'])
return u |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_users(self, usernames):
"""Fetch user info for given usernames :param username: The usernames you want metadata for (max. 50) """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/whois', post_data={
"usernames":usernames
})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u)
return users |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def watch( self, username, watch={ "friend":True, "deviations":True, "journals":True, "forum_threads":True, "critiques":True, "scraps":True, "activity":True, "collections":True } ):
"""Watch a user :param username: The username you want to watch """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/watch/{}'.format(username), post_data={
"watch[friend]": watch['friend'],
"watch[deviations]": watch['deviations'],
"watch[journals]": watch['journals'],
"watch[forum_threads]": watch['forum_threads'],
"watch[critiques]": watch['critiques'],
"watch[scraps]": watch['scraps'],
"watch[activity]": watch['activity'],
"watch[collections]": watch['collections'],
})
return response['success'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unwatch(self, username):
"""Unwatch a user :param username: The username you want to unwatch """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/unwatch/{}'.format(username))
return response['success'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_watching(self, username):
"""Check if user is being watched by the given user :param username: Check if username is watching you """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/friends/watching/{}'.format(username))
return response['watching'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_user(self, user_is_artist="", artist_level="", artist_specialty="", real_name="", tagline="", countryid="", website="", bio=""):
"""Update the users profile information :param user_is_artist: Is the user an artist? :param artist_level: If the user is an artist, what level are they :param artist_specialty: If the user is an artist, what is their specialty :param real_name: The users real name :param tagline: The users tagline :param countryid: The users location :param website: The users personal website :param bio: The users bio """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
post_data = {}
if user_is_artist:
post_data["user_is_artist"] = user_is_artist
if artist_level:
post_data["artist_level"] = artist_level
if artist_specialty:
post_data["artist_specialty"] = artist_specialty
if real_name:
post_data["real_name"] = real_name
if tagline:
post_data["tagline"] = tagline
if countryid:
post_data["countryid"] = countryid
if website:
post_data["website"] = website
if bio:
post_data["bio"] = bio
response = self._req('/user/profile/update', post_data=post_data)
return response['success'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_watchers(self, username, offset=0, limit=10):
"""Get the user's list of watchers :param username: The username you want to get a list of watchers of :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/user/watchers/{}'.format(username), {
'offset' : offset,
'limit' : limit
})
watchers = []
for item in response['results']:
w = {}
w['user'] = User()
w['user'].from_dict(item['user'])
w['is_watching'] = item['is_watching']
w['lastvisit'] = item['lastvisit']
w['watch'] = {
"friend" : item['watch']['friend'],
"deviations" : item['watch']['deviations'],
"journals" : item['watch']['journals'],
"forum_threads" : item['watch']['forum_threads'],
"critiques" : item['watch']['critiques'],
"scraps" : item['watch']['scraps'],
"activity" : item['watch']['activity'],
"collections" : item['watch']['collections']
}
watchers.append(w)
return {
"results" : watchers,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_friends(self, username, offset=0, limit=10):
"""Get the users list of friends :param username: The username you want to get a list of friends of :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/user/friends/{}'.format(username), {
'offset' : offset,
'limit' : limit
})
friends = []
for item in response['results']:
f = {}
f['user'] = User()
f['user'].from_dict(item['user'])
f['is_watching'] = item['is_watching']
f['lastvisit'] = item['lastvisit']
f['watch'] = {
"friend" : item['watch']['friend'],
"deviations" : item['watch']['deviations'],
"journals" : item['watch']['journals'],
"forum_threads" : item['watch']['forum_threads'],
"critiques" : item['watch']['critiques'],
"scraps" : item['watch']['scraps'],
"activity" : item['watch']['activity'],
"collections" : item['watch']['collections']
}
friends.append(f)
return {
"results" : friends,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_statuses(self, username, offset=0, limit=10):
"""Fetch status updates of a user :param username: The username you want to get a list of status updates from :param offset: the pagination offset :param limit: the pagination limit """ |
response = self._req('/user/statuses/', {
"username" : username,
'offset' : offset,
'limit' : limit
})
statuses = []
for item in response['results']:
s = Status()
s.from_dict(item)
statuses.append(s)
return {
"results" : statuses,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_status(self, statusid):
"""Fetch the status :param statusid: Status uuid """ |
response = self._req('/user/statuses/{}'.format(statusid))
s = Status()
s.from_dict(response)
return s |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_status(self, body="", id="", parentid="", stashid=""):
"""Post a status :param username: The body of the status :param id: The id of the object you wish to share :param parentid: The parentid of the object you wish to share :param stashid: The stashid of the object you wish to add to the status """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/statuses/post', post_data={
"body":body,
"id":id,
"parentid":parentid,
"stashid":stashid
})
return response['statusid'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data(self, endpoint="privacy"):
"""Returns policies of DeviantArt""" |
if endpoint == "privacy":
response = self._req('/data/privacy')
elif endpoint == "submission":
response = self._req('/data/submission')
elif endpoint == "tos":
response = self._req('/data/tos')
else:
raise DeviantartError("Unknown endpoint.")
return response['text'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_messages(self, folderid="", stack=1, cursor=""):
"""Feed of all messages :param folderid: The folder to fetch messages from, defaults to inbox :param stack: True to use stacked mode, false to use flat mode """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feed', {
'folderid' : folderid,
'stack' : stack,
'cursor' : cursor
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"cursor" : response['cursor']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_message(self, messageid="", folderid="", stackid=""):
"""Delete a message or a message stack :param folderid: The folder to delete the message from, defaults to inbox :param messageid: The message to delete :param stackid: The stack to delete """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/delete', post_data={
'folderid' : folderid,
'messageid' : messageid,
'stackid' : stackid
})
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_feedback(self, feedbacktype="comments", folderid="", stack=1, offset=0, limit=10):
"""Fetch feedback messages :param feedbacktype: Type of feedback messages to fetch (comments/replies/activity) :param folderid: The folder to fetch messages from, defaults to inbox :param stack: True to use stacked mode, false to use flat mode :param offset: the pagination offset :param limit: the pagination limit """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feedback', {
'type' : feedbacktype,
'folderid' : folderid,
'stack' : stack,
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_feedback_in_stack(self, stackid, offset=0, limit=10):
"""Fetch feedback messages in a stack :param stackid: Id of the stack :param offset: the pagination offset :param limit: the pagination limit """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/messages/feedback/{}'.format(stackid), {
'offset' : offset,
'limit' : limit
})
messages = []
for item in response['results']:
m = Message()
m.from_dict(item)
messages.append(m)
return {
"results" : messages,
"has_more" : response['has_more'],
"next_offset" : response['next_offset']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_note(self, noteid):
"""Fetch a single note :param folderid: The UUID of the note """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/{}'.format(noteid))
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_note(self, to, subject="", body="", noetid=""):
"""Send a note :param to: The username(s) that this note is to :param subject: The subject of the note :param body: The body of the note :param noetid: The UUID of the note that is being responded to """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/send', post_data={
'to[]' : to,
'subject' : subject,
'body' : body,
'noetid' : noetid
})
sent_notes = []
for item in response['results']:
n = {}
n['success'] = item['success']
n['user'] = User()
n['user'].from_dict(item['user'])
sent_notes.append(n)
return sent_notes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_notes(self, noteids, folderid):
"""Move notes to a folder :param noteids: The noteids to move :param folderid: The folderid to move notes to """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/move', post_data={
'noteids[]' : noteids,
'folderid' : folderid
})
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_notes(self, noteids):
"""Delete a note or notes :param noteids: The noteids to delete """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/delete', post_data={
'noteids[]' : noteids
})
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rename_notes_folder(self, title, folderid):
"""Rename a folder :param title: New title of the folder :param folderid: The UUID of the folder to rename """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/rename/{}'.format(folderid), post_data={
'title' : title
})
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_notes_folder(self, folderid):
"""Delete note folder :param folderid: The UUID of the folder to delete """ |
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/folders/remove/{}'.format(folderid))
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _req(self, endpoint, get_data=dict(), post_data=dict()):
"""Helper method to make API calls :param endpoint: The endpoint to make the API call to :param get_data: data send through GET :param post_data: data send through POST """ |
if get_data:
request_parameter = "{}?{}".format(endpoint, urlencode(get_data))
else:
request_parameter = endpoint
try:
encdata = urlencode(post_data, True).encode('utf-8')
response = self.oauth.request(request_parameter, data=encdata)
self._checkResponseForErrors(response)
except HTTPError as e:
raise DeviantartError(e)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def revoke_tokens(self):
""" Revoke the authorization token and all tokens that were generated using it. """ |
self.is_active = False
self.save()
self.refresh_token.revoke_tokens() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def revoke_tokens(self):
""" Revokes the refresh token and all access tokens that were generated using it. """ |
self.is_active = False
self.save()
for access_token in self.access_tokens.all():
access_token.revoke() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, outfile, encoding):
"""Method override to create self-closing elements. https://docs.djangoproject.com/en/2.0/ref/utils/#django.utils.feedgenerator.SyndicationFeed.write https://github.com/django/django/blob/2.0/django/utils/feedgenerator.py#L216 """ |
try:
handler = EscapeFriendlyXMLGenerator(outfile, encoding, short_empty_elements=True)
except TypeError: # Python 2
handler = EscapeFriendlyXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('rss', self.rss_attributes())
handler.startElement('channel', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement('rss') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_request(self, request):
""" Try to authenticate the user based on any given tokens that have been provided to the request object. This will try to detect the authentication type and assign the detected User object to the `request.user` variable, similar to the standard Django authentication. """ |
request.auth_type = None
http_authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not http_authorization:
return
auth = http_authorization.split()
self.auth_type = auth[0].lower()
self.auth_value = " ".join(auth[1:]).strip()
request.auth_type = self.auth_type
self.validate_auth_type()
if not self.handler_name:
raise Exception("There is no handler defined for this authentication type.")
self.load_handler()
response = self.handler.validate(self.auth_value, request)
if response is not None:
return response
request.access_token = self.handler.access_token(self.auth_value, request)
request.user = self.handler.authenticate(self.auth_value, request) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_handler(self):
""" Load the detected handler. """ |
handler_path = self.handler_name.split(".")
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
self.handler = getattr(handler_module, handler_path[-1])() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_auth_type(self):
""" Validate the detected authorization type against the list of handlers. This will return the full module path to the detected handler. """ |
for handler in HANDLERS:
handler_type = handler.split(".")[-2]
if handler_type == self.auth_type:
self.handler_name = handler
return
self.handler_name = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def show_url(context, **kwargs):
"""Return the show feed URL with different protocol.""" |
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.'))
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub(r'https?:\/\/', '%s://' % kwargs['protocol'], url) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loads(text):
""" Parses TOML text into a dict-like object and returns it. """ |
from prettytoml.parser import parse_tokens
from prettytoml.lexer import tokenize as lexer
from .file import TOMLFile
tokens = tuple(lexer(text, is_top_level=True))
elements = parse_tokens(tokens)
return TOMLFile(elements) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dumps(value):
""" Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ |
from contoml.file.file import TOMLFile
if not isinstance(value, TOMLFile):
raise RuntimeError("Can only dump a TOMLFile instance loaded by load() or loads()")
return value.dumps() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump(obj, file_path, prettify=False):
""" Dumps a data structure to the filesystem as TOML. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ |
with open(file_path, 'w') as fp:
fp.write(dumps(obj)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rate(base, target, error_log=None):
"""Get current exchange rate. :param base: A base currency :param target: Convert to the target currency :param error_log: A callable function to track the exception It parses current exchange rate from these services: 1) Yahoo finance 2) fixer.io 3) European Central Bank It will fallback to the next service when previous not available. The exchane rate is a decimal number. If `None` is returned, it means the parsing goes wrong:: Decimal('6.2045') """ |
if base == target:
return decimal.Decimal(1.00)
services = [yahoo, fixer, ecb]
if error_log is None:
error_log = _error_log
for fn in services:
try:
return fn(base, target)
except Exception as e:
error_log(e)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yahoo(base, target):
"""Parse data from Yahoo.""" |
api_url = 'http://download.finance.yahoo.com/d/quotes.csv'
resp = requests.get(
api_url,
params={
'e': '.csv',
'f': 'sl1d1t1',
's': '{0}{1}=X'.format(base, target)
},
timeout=1,
)
value = resp.text.split(',', 2)[1]
return decimal.Decimal(value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fixer(base, target):
"""Parse data from fixer.io.""" |
api_url = 'http://api.fixer.io/latest'
resp = requests.get(
api_url,
params={
'base': base,
'symbols': target,
},
timeout=1,
)
data = resp.json()
return decimal.Decimal(data['rates'][target]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ecb(base, target):
"""Parse data from European Central Bank.""" |
api_url = 'http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml'
resp = requests.get(api_url, timeout=1)
text = resp.text
def _find_rate(symbol):
if symbol == 'EUR':
return decimal.Decimal(1.00)
m = re.findall(r"currency='%s' rate='([0-9\.]+)'" % symbol, text)
return decimal.Decimal(m[0])
return _find_rate(target) / _find_rate(base) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dot(vec1, vec2):
"""Returns the dot product of two Vectors""" |
if isinstance(vec1, Vector3) and isinstance(vec2, Vector3):
return (vec1.x * vec2.x) + (vec1.y * vec2.y) + (vec1.z * vec2.z)
elif isinstance(vec1, Vector4) and isinstance(vec2, Vector4):
return (vec1.x * vec2.x) + (vec1.y * vec2.y) + (vec1.z * vec2.z) + (vec1.w * vec2.w)
else:
raise TypeError("vec1 and vec2 must a Vector type") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def angle(vec1, vec2):
"""Returns the angle between two vectors""" |
dot_vec = dot(vec1, vec2)
mag1 = vec1.length()
mag2 = vec2.length()
result = dot_vec / (mag1 * mag2)
return math.acos(result) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def project(vec1, vec2):
"""Project vector1 onto vector2.""" |
if isinstance(vec1, Vector3) and isinstance(vec2, Vector3) \
or isinstance(vec1, Vector4) and isinstance(vec2, Vector4):
return dot(vec1, vec2) / vec2.length() * vec2.normalize_copy()
else:
raise ValueError("vec1 and vec2 must be Vector3 or Vector4 objects.") |
Subsets and Splits