text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Remove unnecessary whitespace while keeping logical structure.
<END_TASK>
<USER_TASK:>
Description:
def remove_whitespace(text):
"""Remove unnecessary whitespace while keeping logical structure.
Keyword arguments:
text -- text to remove whitespace from (list)
Retain paragraph structure but remove other whitespace,
such as between words on a line and at the start and end of the text.
""" |
clean_text = []
curr_line = ''
# Remove any newlines that follow two lines of whitespace consecutively
# Also remove whitespace at start and end of text
while text:
if not curr_line:
# Find the first line that is not whitespace and add it
curr_line = text.pop(0)
while not curr_line.strip() and text:
curr_line = text.pop(0)
if curr_line.strip():
clean_text.append(curr_line)
else:
# Filter the rest of the lines
curr_line = text.pop(0)
if not text:
# Add the final line if it is not whitespace
if curr_line.strip():
clean_text.append(curr_line)
continue
if curr_line.strip():
clean_text.append(curr_line)
else:
# If the current line is whitespace then make sure there is
# no more than one consecutive line of whitespace following
if not text[0].strip():
if len(text) > 1 and text[1].strip():
clean_text.append(curr_line)
else:
clean_text.append(curr_line)
# Now filter each individual line for extraneous whitespace
cleaner_text = []
for line in clean_text:
clean_line = ' '.join(line.split())
if not clean_line.strip():
clean_line += '\n'
cleaner_text.append(clean_line)
return cleaner_text |
<SYSTEM_TASK:>
Filter text using XPath, regex keywords, and tag attributes.
<END_TASK>
<USER_TASK:>
Description:
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
""" |
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line] |
<SYSTEM_TASK:>
Parse and return text content of infiles.
<END_TASK>
<USER_TASK:>
Description:
def get_parsed_text(args, infilename):
"""Parse and return text content of infiles.
Keyword arguments:
args -- program arguments (dict)
infilenames -- name of user-inputted and/or downloaded file (str)
Return a list of strings of text.
""" |
parsed_text = []
if infilename.endswith('.html'):
# Convert HTML to lxml object for content parsing
html = lh.fromstring(read_files(infilename))
text = None
else:
html = None
text = read_files(infilename)
if html is not None:
parsed_text = parse_text(html, args['xpath'], args['filter'],
args['attributes'])
elif text is not None:
parsed_text = parse_text(text, args['xpath'], args['filter'])
else:
if not args['quiet']:
sys.stderr.write('Failed to parse text from {0}.\n'
.format(infilename))
return parsed_text |
<SYSTEM_TASK:>
Add base netloc and path to internal URLs and remove www, fragments.
<END_TASK>
<USER_TASK:>
Description:
def clean_url(url, base_url=None):
"""Add base netloc and path to internal URLs and remove www, fragments.""" |
parsed_url = urlparse(url)
fragment = '{url.fragment}'.format(url=parsed_url)
if fragment:
url = url.split(fragment)[0]
# Identify internal URLs and fix their format
netloc = '{url.netloc}'.format(url=parsed_url)
if base_url is not None and not netloc:
parsed_base = urlparse(base_url)
split_base = '{url.scheme}://{url.netloc}{url.path}/'.format(url=parsed_base)
url = urljoin(split_base, url)
netloc = '{url.netloc}'.format(url=urlparse(url))
if 'www.' in netloc:
url = url.replace(netloc, netloc.replace('www.', ''))
return url.rstrip(string.punctuation) |
<SYSTEM_TASK:>
Construct the output filename from domain and end of path.
<END_TASK>
<USER_TASK:>
Description:
def get_outfilename(url, domain=None):
"""Construct the output filename from domain and end of path.""" |
if domain is None:
domain = get_domain(url)
path = '{url.path}'.format(url=urlparse(url))
if '.' in path:
tail_url = path.split('.')[-2]
else:
tail_url = path
if tail_url:
if '/' in tail_url:
tail_pieces = [x for x in tail_url.split('/') if x]
tail_url = tail_pieces[-1]
# Keep length of return string below or equal to max_len
max_len = 24
if domain:
max_len -= (len(domain) + 1)
if len(tail_url) > max_len:
if '-' in tail_url:
tail_pieces = [x for x in tail_url.split('-') if x]
tail_url = tail_pieces.pop(0)
if len(tail_url) > max_len:
tail_url = tail_url[:max_len]
else:
# Add as many tail pieces that can fit
tail_len = 0
for piece in tail_pieces:
tail_len += len(piece)
if tail_len <= max_len:
tail_url += '-' + piece
else:
break
else:
tail_url = tail_url[:max_len]
if domain:
return '{0}-{1}'.format(domain, tail_url).lower()
return tail_url
return domain.lower() |
<SYSTEM_TASK:>
Use first possible entry in query as filename.
<END_TASK>
<USER_TASK:>
Description:
def get_single_outfilename(args):
"""Use first possible entry in query as filename.""" |
for arg in args['query']:
if arg in args['files']:
return ('.'.join(arg.split('.')[:-1])).lower()
for url in args['urls']:
if arg.strip('/') in url:
domain = get_domain(url)
return get_outfilename(url, domain)
sys.stderr.write('Failed to construct a single out filename.\n')
return '' |
<SYSTEM_TASK:>
Modify filename to have a unique numerical identifier.
<END_TASK>
<USER_TASK:>
Description:
def modify_filename_id(filename):
"""Modify filename to have a unique numerical identifier.""" |
split_filename = os.path.splitext(filename)
id_num_re = re.compile('(\(\d\))')
id_num = re.findall(id_num_re, split_filename[-2])
if id_num:
new_id_num = int(id_num[-1].lstrip('(').rstrip(')')) + 1
# Reconstruct filename with incremented id and its extension
filename = ''.join((re.sub(id_num_re, '({0})'.format(new_id_num),
split_filename[-2]), split_filename[-1]))
else:
split_filename = os.path.splitext(filename)
# Reconstruct filename with new id and its extension
filename = ''.join(('{0} (2)'.format(split_filename[-2]),
split_filename[-1]))
return filename |
<SYSTEM_TASK:>
If filename exists, overwrite or modify it to be unique.
<END_TASK>
<USER_TASK:>
Description:
def overwrite_file_check(args, filename):
"""If filename exists, overwrite or modify it to be unique.""" |
if not args['overwrite'] and os.path.exists(filename):
# Confirm overwriting of the file, or modify filename
if args['no_overwrite']:
overwrite = False
else:
try:
overwrite = confirm_input(input('Overwrite {0}? (yes/no): '
.format(filename)))
except (KeyboardInterrupt, EOFError):
sys.exit()
if not overwrite:
new_filename = modify_filename_id(filename)
while os.path.exists(new_filename):
new_filename = modify_filename_id(new_filename)
return new_filename
return filename |
<SYSTEM_TASK:>
Print text content of infiles to stdout.
<END_TASK>
<USER_TASK:>
Description:
def print_text(args, infilenames, outfilename=None):
"""Print text content of infiles to stdout.
Keyword arguments:
args -- program arguments (dict)
infilenames -- names of user-inputted and/or downloaded files (list)
outfilename -- only used for interface purposes (None)
""" |
for infilename in infilenames:
parsed_text = get_parsed_text(args, infilename)
if parsed_text:
for line in parsed_text:
print(line)
print('') |
<SYSTEM_TASK:>
Write a single file to disk.
<END_TASK>
<USER_TASK:>
Description:
def write_file(data, outfilename):
"""Write a single file to disk.""" |
if not data:
return False
try:
with open(outfilename, 'w') as outfile:
for line in data:
if line:
outfile.write(line)
return True
except (OSError, IOError) as err:
sys.stderr.write('An error occurred while writing {0}:\n{1}'
.format(outfilename, str(err)))
return False |
<SYSTEM_TASK:>
Get the number of PART.html files currently saved to disk.
<END_TASK>
<USER_TASK:>
Description:
def get_num_part_files():
"""Get the number of PART.html files currently saved to disk.""" |
num_parts = 0
for filename in os.listdir(os.getcwd()):
if filename.startswith('PART') and filename.endswith('.html'):
num_parts += 1
return num_parts |
<SYSTEM_TASK:>
Check user input for yes, no, or an exit signal.
<END_TASK>
<USER_TASK:>
Description:
def confirm_input(user_input):
"""Check user input for yes, no, or an exit signal.""" |
if isinstance(user_input, list):
user_input = ''.join(user_input)
try:
u_inp = user_input.lower().strip()
except AttributeError:
u_inp = user_input
# Check for exit signal
if u_inp in ('q', 'quit', 'exit'):
sys.exit()
if u_inp in ('y', 'yes'):
return True
return False |
<SYSTEM_TASK:>
Converts between two inputted chemical formats.
<END_TASK>
<USER_TASK:>
Description:
def convert(data, in_format, out_format, name=None, pretty=False):
"""Converts between two inputted chemical formats.
Args:
data: A string representing the chemical file to be converted. If the
`in_format` is "json", this can also be a Python object
in_format: The format of the `data` string. Can be "json" or any format
recognized by Open Babel
out_format: The format to convert to. Can be "json" or any format
recognized by Open Babel
name: (Optional) If `out_format` is "json", will save the specified
value in a "name" property
pretty: (Optional) If True and `out_format` is "json", will pretty-
print the output for human readability
Returns:
A string representing the inputted `data` in the specified `out_format`
""" |
# Decide on a json formatter depending on desired prettiness
dumps = json.dumps if pretty else json.compress
# Shortcut for avoiding pybel dependency
if not has_ob and in_format == 'json' and out_format == 'json':
return dumps(json.loads(data) if is_string(data) else data)
elif not has_ob:
raise ImportError("Chemical file format conversion requires pybel.")
# These use the open babel library to interconvert, with additions for json
if in_format == 'json':
mol = json_to_pybel(json.loads(data) if is_string(data) else data)
elif in_format == 'pybel':
mol = data
else:
mol = pybel.readstring(in_format, data)
# Infer structure in cases where the input format has no specification
if not mol.OBMol.HasNonZeroCoords():
mol.make3D()
# Make P1 if that's a thing, recalculating bonds in process
if in_format == 'mmcif' and hasattr(mol, 'unitcell'):
mol.unitcell.FillUnitCell(mol.OBMol)
mol.OBMol.ConnectTheDots()
mol.OBMol.PerceiveBondOrders()
mol.OBMol.Center()
if out_format == 'pybel':
return mol
elif out_format == 'object':
return pybel_to_json(mol, name)
elif out_format == 'json':
return dumps(pybel_to_json(mol, name))
else:
return mol.write(out_format) |
<SYSTEM_TASK:>
Converts python data structure to pybel.Molecule.
<END_TASK>
<USER_TASK:>
Description:
def json_to_pybel(data, infer_bonds=False):
"""Converts python data structure to pybel.Molecule.
This will infer bond data if not specified.
Args:
data: The loaded json data of a molecule, as a Python object
infer_bonds (Optional): If no bonds specified in input, infer them
Returns:
An instance of `pybel.Molecule`
""" |
obmol = ob.OBMol()
obmol.BeginModify()
for atom in data['atoms']:
obatom = obmol.NewAtom()
obatom.SetAtomicNum(table.GetAtomicNum(str(atom['element'])))
obatom.SetVector(*atom['location'])
if 'label' in atom:
pd = ob.OBPairData()
pd.SetAttribute('_atom_site_label')
pd.SetValue(atom['label'])
obatom.CloneData(pd)
# If there is no bond data, try to infer them
if 'bonds' not in data or not data['bonds']:
if infer_bonds:
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
# Otherwise, use the bonds in the data set
else:
for bond in data['bonds']:
if 'atoms' not in bond:
continue
obmol.AddBond(bond['atoms'][0] + 1, bond['atoms'][1] + 1,
bond['order'])
# Check for unit cell data
if 'unitcell' in data:
uc = ob.OBUnitCell()
uc.SetData(*(ob.vector3(*v) for v in data['unitcell']))
uc.SetSpaceGroup('P1')
obmol.CloneData(uc)
obmol.EndModify()
mol = pybel.Molecule(obmol)
# Add partial charges
if 'charge' in data['atoms'][0]:
mol.OBMol.SetPartialChargesPerceived()
for atom, pyatom in zip(data['atoms'], mol.atoms):
pyatom.OBAtom.SetPartialCharge(atom['charge'])
return mol |
<SYSTEM_TASK:>
Converts a pybel molecule to json.
<END_TASK>
<USER_TASK:>
Description:
def pybel_to_json(molecule, name=None):
"""Converts a pybel molecule to json.
Args:
molecule: An instance of `pybel.Molecule`
name: (Optional) If specified, will save a "name" property
Returns:
A Python dictionary containing atom and bond data
""" |
# Save atom element type and 3D location.
atoms = [{'element': table.GetSymbol(atom.atomicnum),
'location': list(atom.coords)}
for atom in molecule.atoms]
# Recover auxiliary data, if exists
for json_atom, pybel_atom in zip(atoms, molecule.atoms):
if pybel_atom.partialcharge != 0:
json_atom['charge'] = pybel_atom.partialcharge
if pybel_atom.OBAtom.HasData('_atom_site_label'):
obatom = pybel_atom.OBAtom
json_atom['label'] = obatom.GetData('_atom_site_label').GetValue()
if pybel_atom.OBAtom.HasData('color'):
obatom = pybel_atom.OBAtom
json_atom['color'] = obatom.GetData('color').GetValue()
# Save number of bonds and indices of endpoint atoms
bonds = [{'atoms': [b.GetBeginAtom().GetIndex(),
b.GetEndAtom().GetIndex()],
'order': b.GetBondOrder()}
for b in ob.OBMolBondIter(molecule.OBMol)]
output = {'atoms': atoms, 'bonds': bonds, 'units': {}}
# If there's unit cell data, save it to the json output
if hasattr(molecule, 'unitcell'):
uc = molecule.unitcell
output['unitcell'] = [[v.GetX(), v.GetY(), v.GetZ()]
for v in uc.GetCellVectors()]
density = (sum(atom.atomicmass for atom in molecule.atoms) /
(uc.GetCellVolume() * 0.6022))
output['density'] = density
output['units']['density'] = 'kg / L'
# Save the formula to json. Use Hill notation, just to have a standard.
element_count = Counter(table.GetSymbol(a.atomicnum) for a in molecule)
hill_count = []
for element in ['C', 'H']:
if element in element_count:
hill_count += [(element, element_count[element])]
del element_count[element]
hill_count += sorted(element_count.items())
# If it's a crystal, then reduce the Hill formula
div = (reduce(gcd, (c[1] for c in hill_count))
if hasattr(molecule, 'unitcell') else 1)
output['formula'] = ''.join(n if c / div == 1 else '%s%d' % (n, c / div)
for n, c in hill_count)
output['molecular_weight'] = molecule.molwt / div
output['units']['molecular_weight'] = 'g / mol'
# If the input has been given a name, add that
if name:
output['name'] = name
return output |
<SYSTEM_TASK:>
Fired when an unserializable object is hit.
<END_TASK>
<USER_TASK:>
Description:
def default(self, obj):
"""Fired when an unserializable object is hit.""" |
if hasattr(obj, '__dict__'):
return obj.__dict__.copy()
elif HAS_NUMPY and isinstance(obj, np.ndarray):
return obj.copy().tolist()
else:
raise TypeError(("Object of type {:s} with value of {:s} is not "
"JSON serializable").format(type(obj), repr(obj))) |
<SYSTEM_TASK:>
Converts input chemical formats to json and optimizes structure.
<END_TASK>
<USER_TASK:>
Description:
def generate(data, format="auto"):
"""Converts input chemical formats to json and optimizes structure.
Args:
data: A string or file representing a chemical
format: The format of the `data` variable (default is 'auto')
The `format` can be any value specified by Open Babel
(http://openbabel.org/docs/2.3.1/FileFormats/Overview.html). The 'auto'
option uses the extension for files (ie. my_file.mol -> mol) and defaults
to SMILES (smi) for strings.
""" |
# Support both files and strings and attempt to infer file type
try:
with open(data) as in_file:
if format == 'auto':
format = data.split('.')[-1]
data = in_file.read()
except:
if format == 'auto':
format = 'smi'
return format_converter.convert(data, format, 'json') |
<SYSTEM_TASK:>
Starts up the imolecule server, complete with argparse handling.
<END_TASK>
<USER_TASK:>
Description:
def start_server():
"""Starts up the imolecule server, complete with argparse handling.""" |
parser = argparse.ArgumentParser(description="Opens a browser-based "
"client that interfaces with the "
"chemical format converter.")
parser.add_argument('--debug', action="store_true", help="Prints all "
"transmitted data streams.")
parser.add_argument('--port', type=int, default=8000, help="The port "
"on which to serve the website.")
parser.add_argument('--timeout', type=int, default=5, help="The maximum "
"time, in seconds, allowed for a process to run "
"before returning an error.")
parser.add_argument('--workers', type=int, default=2, help="The number of "
"worker processes to use with the server.")
parser.add_argument('--no-browser', action="store_true", help="Disables "
"opening a browser window on startup.")
global args
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
handlers = [(r'/', IndexHandler), (r'/websocket', WebSocket),
(r'/static/(.*)', tornado.web.StaticFileHandler,
{'path': os.path.normpath(os.path.dirname(__file__))})]
application = tornado.web.Application(handlers)
application.listen(args.port)
if not args.no_browser:
webbrowser.open('http://localhost:%d/' % args.port, new=2)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
sys.stderr.write("Received keyboard interrupt. Stopping server.\n")
tornado.ioloop.IOLoop.instance().stop()
sys.exit(1) |
<SYSTEM_TASK:>
Parse an SCI command response into ElementTree XML
<END_TASK>
<USER_TASK:>
Description:
def _parse_command_response(response):
"""Parse an SCI command response into ElementTree XML
This is a helper method that takes a Requests Response object
of an SCI command response and will parse it into an ElementTree Element
representing the root of the XML response.
:param response: The requests response object
:return: An ElementTree Element that is the root of the response XML
:raises ResponseParseError: If the response XML is not well formed
""" |
try:
root = ET.fromstring(response.text)
except ET.ParseError:
raise ResponseParseError(
"Unexpected response format, could not parse XML. Response: {}".format(response.text))
return root |
<SYSTEM_TASK:>
Parse an error ElementTree Node to create an ErrorInfo object
<END_TASK>
<USER_TASK:>
Description:
def _parse_error_tree(error):
"""Parse an error ElementTree Node to create an ErrorInfo object
:param error: The ElementTree error node
:return: An ErrorInfo object containing the error ID and the message.
""" |
errinf = ErrorInfo(error.get('id'), None)
if error.text is not None:
errinf.message = error.text
else:
desc = error.find('./desc')
if desc is not None:
errinf.message = desc.text
return errinf |
<SYSTEM_TASK:>
Get the contents of this file
<END_TASK>
<USER_TASK:>
Description:
def get_data(self):
"""Get the contents of this file
:return: The contents of this file
:rtype: six.binary_type
""" |
target = DeviceTarget(self.device_id)
return self._fssapi.get_file(target, self.path)[self.device_id] |
<SYSTEM_TASK:>
Delete this file from the device
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""Delete this file from the device
.. note::
After deleting the file, this object will no longer contain valid information
and further calls to delete or get_data will return :class:`~.ErrorInfo` objects
""" |
target = DeviceTarget(self.device_id)
return self._fssapi.delete_file(target, self.path)[self.device_id] |
<SYSTEM_TASK:>
List the contents of this directory
<END_TASK>
<USER_TASK:>
Description:
def list_contents(self):
"""List the contents of this directory
:return: A LsInfo object that contains directories and files
:rtype: :class:`~.LsInfo` or :class:`~.ErrorInfo`
Here is an example usage::
# let dirinfo be a DirectoryInfo object
ldata = dirinfo.list_contents()
if isinstance(ldata, ErrorInfo):
# Do some error handling
logger.warn("Error listing file info: (%s) %s", ldata.errno, ldata.message)
# It's of type LsInfo
else:
# Look at all the files
for finfo in ldata.files:
logger.info("Found file %s of size %s", finfo.path, finfo.size)
# Look at all the directories
for dinfo in ldata.directories:
logger.info("Found directory %s of last modified %s", dinfo.path, dinfo.last_modified)
""" |
target = DeviceTarget(self.device_id)
return self._fssapi.list_files(target, self.path)[self.device_id] |
<SYSTEM_TASK:>
Parse the server response for this ls command
<END_TASK>
<USER_TASK:>
Description:
def parse_response(cls, response, device_id=None, fssapi=None, **kwargs):
"""Parse the server response for this ls command
This will parse xml of the following form::
<ls hash="hash_type">
<file path="file_path" last_modified=last_modified_time ... />
...
<dir path="dir_path" last_modified=last_modified_time />
...
</ls>
or with an error::
<ls>
<error ... />
</ls>
:param response: The XML root of the response for an ls command
:type response: :class:`xml.etree.ElementTree.Element`
:param device_id: The device id of the device this ls response came from
:param fssapi: A reference to a :class:`~FileSystemServiceAPI` for use with the
:class:`~FileInfo` and :class:`~DirectoryInfo` objects for future commands
:return: An :class:`~LsInfo` object containing the list of directories and files on
the device or an :class:`~ErrorInfo` if the xml contained an error
""" |
if response.tag != cls.command_name:
raise ResponseParseError(
"Received response of type {}, LsCommand can only parse responses of type {}".format(response.tag,
cls.command_name))
if fssapi is None:
raise FileSystemServiceException("fssapi is required to parse an LsCommand response")
if device_id is None:
raise FileSystemServiceException("device_id is required to parse an LsCommand response")
error = response.find('./error')
if error is not None:
return _parse_error_tree(error)
hash_type = response.get('hash')
dirs = []
files = []
# Get each file listed in this response
for myfile in response.findall('./file'):
fi = FileInfo(fssapi,
device_id,
myfile.get('path'),
int(myfile.get('last_modified')),
int(myfile.get('size')),
myfile.get('hash'),
hash_type)
files.append(fi)
# Get each directory listed for this device
for mydir in response.findall('./dir'):
di = DirectoryInfo(fssapi,
device_id,
mydir.get('path'),
int(mydir.get('last_modified')))
dirs.append(di)
return LsInfo(directories=dirs, files=files) |
<SYSTEM_TASK:>
Parse the server response for this get file command
<END_TASK>
<USER_TASK:>
Description:
def parse_response(cls, response, **kwargs):
"""Parse the server response for this get file command
This will parse xml of the following form::
<get_file>
<data>
asdfasdfasdfasdfasf
</data>
</get_file>
or with an error::
<get_file>
<error ... />
</get_file>
:param response: The XML root of the response for a get file command
:type response: :class:`xml.etree.ElementTree.Element`
:return: a six.binary_type string of the data of a file or an :class:`~ErrorInfo` if the xml contained an error
""" |
if response.tag != cls.command_name:
raise ResponseParseError(
"Received response of type {}, GetCommand can only parse responses of type {}".format(response.tag,
cls.command_name))
error = response.find('./error')
if error is not None:
return _parse_error_tree(error)
text = response.find('./data').text
if text:
return base64.b64decode(six.b(text))
else:
return six.b('') |
<SYSTEM_TASK:>
Parse the server response for this put file command
<END_TASK>
<USER_TASK:>
Description:
def parse_response(cls, response, **kwargs):
"""Parse the server response for this put file command
This will parse xml of the following form::
<put_file />
or with an error::
<put_file>
<error ... />
</put_file>
:param response: The XML root of the response for a put file command
:type response: :class:`xml.etree.ElementTree.Element`
:return: None if everything was ok or an :class:`~ErrorInfo` if the xml contained an error
""" |
if response.tag != cls.command_name:
raise ResponseParseError(
"Received response of type {}, PutCommand can only parse responses of type {}".format(response.tag,
cls.command_name))
error = response.find('./error')
if error is not None:
return _parse_error_tree(error)
return None |
<SYSTEM_TASK:>
Send an arbitrary file system command block
<END_TASK>
<USER_TASK:>
Description:
def send_command_block(self, target, command_block):
"""Send an arbitrary file system command block
The primary use for this method is to send multiple file system commands with a single
web service request. This can help to avoid throttling.
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param command_block: The block of commands to execute on the target
:type command_block: :class:`~FileSystemServiceCommandBlock`
:return: The response will be a dictionary where the keys are device_ids and the values are
the parsed responses of each command sent in the order listed in the command response for
that device. In practice it seems to be the same order as the commands were sent in, however,
Device Cloud documentation does not explicitly state anywhere that is the case so I cannot
guarantee it. This does mean that if you send different types of commands the response list
will be different types. Please see the commands parse_response functions for what those types
will be. (:meth:`LsCommand.parse_response`, :class:`GetCommand.parse_response`,
:class:`PutCommand.parse_response`, :class:`DeleteCommand.parse_response`)
""" |
root = _parse_command_response(
self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
results = []
for command in device.find('./commands'):
for command_class in FILE_SYSTEM_COMMANDS:
if command_class.command_name == command.tag.lower():
results.append(command_class.parse_response(command, fssapi=self, device_id=device_id))
out_dict[device_id] = results
return out_dict |
<SYSTEM_TASK:>
List all files and directories in the path on the target
<END_TASK>
<USER_TASK:>
Description:
def list_files(self, target, path, hash='any'):
"""List all files and directories in the path on the target
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to list files and directories from
:param hash: an optional attribute which indicates a hash over the file contents should be retrieved. Values
include none, any, md5, and crc32. any is used to indicate the device should choose its best available hash.
:return: A dictionary with keys of device ids and values of :class:`~.LsInfo` objects containing the files and
directories or an :class:`~.ErrorInfo` object if there was an error response
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
Here is an example usage::
# dc is a DeviceCloud instance
fssapi = dc.get_fss_api()
target = AllTarget()
ls_dir = '/root/home/user/important_files/'
ls_data = fssapi.list_files(target, ls_dir)
# Loop over all device results
for device_id, device_data in ls_data.iteritems():
# Check if it succeeded or was an error
if isinstance(device_data, ErrorInfo):
# Do some error handling
logger.warn("Error listing file info on device %s. errno: %s message:%s",
device_id, device_data.errno, device_data.message)
# It's of type LsInfo
else:
# Look at all the files
for finfo in device_data.files:
logger.info("Found file %s of size %s on device %s",
finfo.path, finfo.size, device_id)
# Look at all the directories
for dinfo in device_data.directories:
logger.info("Found directory %s of last modified %s on device %s",
dinfo.path, dinfo.last_modified, device_id)
""" |
command_block = FileSystemServiceCommandBlock()
command_block.add_command(LsCommand(path, hash=hash))
root = _parse_command_response(
self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
# At this point the XML we have is of the form
# <sci_reply>
# <file_system>
# <device id="device_id">
# <commands>
# <ls hash="hash_type">
# <file path="file_path" last_modified=last_modified_time ... />
# ...
# <dir path="dir_path" last_modified=last_modified_time />
# ...
# </ls>
# </commands>
# </device>
# <device id="device_id">
# <commands>
# <ls hash="hash_type">
# <file path="file_path" last_modified=last_modified_time ... />
# ...
# <dir path="dir_path" last_modified=last_modified_time />
# ...
# </ls>
# </commands>
# </device>
# ...
# </file_system>
# </sci_reply>
# Here we will get each of the XML trees rooted at the device nodes
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error)
else:
linfo = LsCommand.parse_response(device.find('./commands/ls'), device_id=device_id, fssapi=self)
out_dict[device_id] = linfo
return out_dict |
<SYSTEM_TASK:>
Get the contents of a file on the device
<END_TASK>
<USER_TASK:>
Description:
def get_file(self, target, path, offset=None, length=None):
"""Get the contents of a file on the device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to retrieve
:param offset: Start retrieving data from this byte position in the file, if None start from the beginning
:param length: How many bytes to retrieve, if None retrieve until the end of the file
:return: A dictionary with keys of device ids and values of the bytes of the file (or partial file if offset
and/or length are specified) or an :class:`~.ErrorInfo` object if there was an error response
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
""" |
command_block = FileSystemServiceCommandBlock()
command_block.add_command(GetCommand(path, offset, length))
root = _parse_command_response(
self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error)
else:
data = GetCommand.parse_response(device.find('./commands/get_file'))
out_dict[device_id] = data
return out_dict |
<SYSTEM_TASK:>
Put data into a file on the device
<END_TASK>
<USER_TASK:>
Description:
def put_file(self, target, path, file_data=None, server_file=None, offset=None, truncate=False):
"""Put data into a file on the device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to write to. If the file already exists it will be overwritten.
:param file_data: A `six.binary_type` containing the data to put into the file
:param server_file: The path to a file on the devicecloud server containing the data to put into the file on the
device
:param offset: Start writing bytes to the file at this position, if None start at the beginning
:param truncate: Boolean, if True after bytes are done being written end the file their even if previous data
exists beyond it. If False, leave any existing data in place.
:return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo`
if the operation failed on that device
:raises: :class:`~.FileSystemServiceException` if either both file_data and server_file are specified or
neither are specified
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
""" |
command_block = FileSystemServiceCommandBlock()
command_block.add_command(PutCommand(path, file_data, server_file, offset, truncate))
root = _parse_command_response(self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error)
else:
out_dict[device_id] = PutCommand.parse_response(device.find('./commands/put_file'))
return out_dict |
<SYSTEM_TASK:>
Delete a file from a device
<END_TASK>
<USER_TASK:>
Description:
def delete_file(self, target, path):
"""Delete a file from a device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the file to delete.
:return: A dictionary with keys being device ids and value being None if successful or an :class:`~.ErrorInfo`
if the operation failed on that device
:raises: :class:`~.ResponseParseError` If the SCI response has unrecognized formatting
""" |
command_block = FileSystemServiceCommandBlock()
command_block.add_command(DeleteCommand(path))
root = _parse_command_response(self._sci_api.send_sci("file_system", target, command_block.get_command_string()))
out_dict = {}
for device in root.findall('./file_system/device'):
device_id = device.get('id')
error = device.find('./error')
if error is not None:
out_dict[device_id] = _parse_error_tree(error)
else:
out_dict[device_id] = DeleteCommand.parse_response(device.find('./commands/rm'))
return out_dict |
<SYSTEM_TASK:>
Get all files and directories from a path on the device modified since a given time
<END_TASK>
<USER_TASK:>
Description:
def get_modified_items(self, target, path, last_modified_cutoff):
"""Get all files and directories from a path on the device modified since a given time
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to the directory to check for modified files.
:param last_modified_cutoff: The time (as Unix epoch time) to get files modified since
:type last_modified_cutoff: int
:return: A dictionary where the key is a device id and the value is either an :class:`~.ErrorInfo` if there
was a problem with the operation or a :class:`~.LsInfo` with the items modified since the
specified date
""" |
file_list = self.list_files(target, path)
out_dict = {}
for device_id, device_data in six.iteritems(file_list):
if isinstance(device_data, ErrorInfo):
out_dict[device_id] = device_data
else:
files = []
dirs = []
for cur_file in device_data.files:
if cur_file.last_modified > last_modified_cutoff:
files.append(cur_file)
for cur_dir in device_data.directories:
if cur_dir.last_modified > last_modified_cutoff:
dirs.append(cur_dir)
out_dict[device_id] = LsInfo(directories=dirs, files=files)
return out_dict |
<SYSTEM_TASK:>
Check if path refers to an existing path on the device
<END_TASK>
<USER_TASK:>
Description:
def exists(self, target, path, path_sep="/"):
"""Check if path refers to an existing path on the device
:param target: The device(s) to be targeted with this request
:type target: :class:`devicecloud.sci.TargetABC` or list of :class:`devicecloud.sci.TargetABC` instances
:param path: The path on the target to check for existence.
:param path_sep: The path separator of the device
:return: A dictionary where the key is a device id and the value is either an :class:`~.ErrorInfo` if there
was a problem with the operation or a boolean with the existence status of the path on that device
""" |
if path.endswith(path_sep):
path = path[:-len(path_sep)]
par_dir, filename = path.rsplit(path_sep, 1)
file_list = self.list_files(target, par_dir)
out_dict = {}
for device_id, device_data in six.iteritems(file_list):
if isinstance(device_data, ErrorInfo):
out_dict[device_id] = device_data
else:
out_dict[device_id] = False
for cur_file in device_data.files:
if cur_file.path == path:
out_dict[device_id] = True
for cur_dir in device_data.directories:
if cur_dir.path == path:
out_dict[device_id] = True
return out_dict |
<SYSTEM_TASK:>
r"""Return the root group for this accounts' group tree
<END_TASK>
<USER_TASK:>
Description:
def get_group_tree_root(self, page_size=1000):
r"""Return the root group for this accounts' group tree
This will return the root group for this tree but with all links
between nodes (i.e. children starting from root) populated.
Examples::
# print the group hierarchy to stdout
dc.devicecore.get_group_tree_root().print_subtree()
# gather statistics about devices in each group including
# the count from its subgroups (recursively)
#
# This also shows how you can go from a group reference to devices
# for that particular group.
stats = {} # group -> devices count including children
def count_nodes(group):
count_for_this_node = \
len(list(dc.devicecore.get_devices(group_path == group.get_path())))
subnode_count = 0
for child in group.get_children():
subnode_count += count_nodes(child)
total = count_for_this_node + subnode_count
stats[group] = total
return total
count_nodes(dc.devicecore.get_group_tree_root())
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: The root group for this device cloud accounts group
hierarchy.
""" |
# first pass, build mapping
group_map = {} # map id -> group
page_size = validate_type(page_size, *six.integer_types)
for group in self.get_groups(page_size=page_size):
group_map[group.get_id()] = group
# second pass, find root and populate list of children for each node
root = None
for group_id, group in group_map.items():
if group.is_root():
root = group
else:
parent = group_map[group.get_parent_id()]
parent.add_child(group)
return root |
<SYSTEM_TASK:>
Return an iterator over all groups in this device cloud account
<END_TASK>
<USER_TASK:>
Description:
def get_groups(self, condition=None, page_size=1000):
"""Return an iterator over all groups in this device cloud account
Optionally, a condition can be specified to limit the number of
groups returned.
Examples::
# Get all groups and print information about them
for group in dc.devicecore.get_groups():
print group
# Iterate over all devices which are in a group with a specific
# ID.
group = dc.devicore.get_groups(group_id == 123)[0]
for device in dc.devicecore.get_devices(group_path == group.get_path()):
print device.get_mac()
:param condition: A condition to use when filtering the results set. If
unspecified, all groups will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Generator over the groups in this device cloud account. No
guarantees about the order of results is provided and child links
between nodes will not be populated.
""" |
query_kwargs = {}
if condition is not None:
query_kwargs["condition"] = condition.compile()
for group_data in self._conn.iter_json_pages("/ws/Group", page_size=page_size, **query_kwargs):
yield Group.from_json(group_data) |
<SYSTEM_TASK:>
Provision multiple devices with a single API call
<END_TASK>
<USER_TASK:>
Description:
def provision_devices(self, devices):
"""Provision multiple devices with a single API call
This method takes an iterable of dictionaries where the values in the dictionary are
expected to match the arguments of a call to :meth:`provision_device`. The
contents of each dictionary will be validated.
:param list devices: An iterable of dictionaries each containing information about
a device to be provision. The form of the dictionary should match the keyword
arguments taken by :meth:`provision_device`.
:raises DeviceCloudHttpException: If there is an unexpected error reported by Device Cloud.
:raises ValueError: If any input fields are known to have a bad form.
:return: A list of dictionaries in the form described for :meth:`provision_device` in the
order matching the requested device list. Note that it is possible for there to
be mixed success and error when provisioning multiple devices.
""" |
# Validate all the input for each device provided
sio = six.StringIO()
def write_tag(tag, val):
sio.write("<{tag}>{val}</{tag}>".format(tag=tag, val=val))
def maybe_write_element(tag, val):
if val is not None:
write_tag(tag, val)
return True
return False
sio.write("<list>")
for d in devices:
sio.write("<DeviceCore>")
mac_address = d.get("mac_address")
device_id = d.get("device_id")
imei = d.get("imei")
if mac_address is not None:
write_tag("devMac", mac_address)
elif device_id is not None:
write_tag("devConnectwareId", device_id)
elif imei is not None:
write_tag("devCellularModemId", imei)
else:
raise ValueError("mac_address, device_id, or imei must be provided for device %r" % d)
# Write optional elements if present.
maybe_write_element("grpPath", d.get("group_path"))
maybe_write_element("dpUserMetaData", d.get("metadata"))
maybe_write_element("dpTags", d.get("tags"))
maybe_write_element("dpMapLong", d.get("map_long"))
maybe_write_element("dpMapLat", d.get("map_lat"))
maybe_write_element("dpContact", d.get("contact"))
maybe_write_element("dpDescription", d.get("description"))
sio.write("</DeviceCore>")
sio.write("</list>")
# Send the request, set the Accept XML as a nicety
results = []
response = self._conn.post("/ws/DeviceCore", sio.getvalue(), headers={'Accept': 'application/xml'})
root = ET.fromstring(response.content) # <result> tag is root of <list> response
for child in root:
if child.tag.lower() == "location":
results.append({
"error": False,
"error_msg": None,
"location": child.text
})
else: # we expect "error" but handle generically
results.append({
"error": True,
"location": None,
"error_msg": child.text
})
return results |
<SYSTEM_TASK:>
Print this group node and the subtree rooted at it
<END_TASK>
<USER_TASK:>
Description:
def print_subtree(self, fobj=sys.stdout, level=0):
"""Print this group node and the subtree rooted at it""" |
fobj.write("{}{!r}\n".format(" " * (level * 2), self))
for child in self.get_children():
child.print_subtree(fobj, level + 1) |
<SYSTEM_TASK:>
Get the JSON metadata for this device as a python data structure
<END_TASK>
<USER_TASK:>
Description:
def get_device_json(self, use_cached=True):
"""Get the JSON metadata for this device as a python data structure
If ``use_cached`` is not True, then a web services request will be made
synchronously in order to get the latest device metatdata. This will
update the cached data for this device.
""" |
if not use_cached:
devicecore_data = self._conn.get_json(
"/ws/DeviceCore/{}".format(self.get_device_id()))
self._device_json = devicecore_data["items"][0] # should only be 1
return self._device_json |
<SYSTEM_TASK:>
Get the list of tags for this device
<END_TASK>
<USER_TASK:>
Description:
def get_tags(self, use_cached=True):
"""Get the list of tags for this device""" |
device_json = self.get_device_json(use_cached)
potential_tags = device_json.get("dpTags")
if potential_tags:
return list(filter(None, potential_tags.split(",")))
else:
return [] |
<SYSTEM_TASK:>
Return True if the device is currrently connect and False if not
<END_TASK>
<USER_TASK:>
Description:
def is_connected(self, use_cached=True):
"""Return True if the device is currrently connect and False if not""" |
device_json = self.get_device_json(use_cached)
return int(device_json.get("dpConnectionStatus")) > 0 |
<SYSTEM_TASK:>
Get this device's device id
<END_TASK>
<USER_TASK:>
Description:
def get_device_id(self, use_cached=True):
"""Get this device's device id""" |
device_json = self.get_device_json(use_cached)
return device_json["id"].get("devId") |
<SYSTEM_TASK:>
Get the last known IP of this device
<END_TASK>
<USER_TASK:>
Description:
def get_ip(self, use_cached=True):
"""Get the last known IP of this device""" |
device_json = self.get_device_json(use_cached)
return device_json.get("dpLastKnownIp") |
<SYSTEM_TASK:>
Get the MAC address of this device
<END_TASK>
<USER_TASK:>
Description:
def get_mac(self, use_cached=True):
"""Get the MAC address of this device""" |
device_json = self.get_device_json(use_cached)
return device_json.get("devMac") |
<SYSTEM_TASK:>
Get the datetime of when this device was added to Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def get_registration_dt(self, use_cached=True):
"""Get the datetime of when this device was added to Device Cloud""" |
device_json = self.get_device_json(use_cached)
start_date_iso8601 = device_json.get("devRecordStartDate")
if start_date_iso8601:
return iso8601_to_dt(start_date_iso8601)
else:
return None |
<SYSTEM_TASK:>
Get a tuple with device latitude and longitude... these may be None
<END_TASK>
<USER_TASK:>
Description:
def get_latlon(self, use_cached=True):
"""Get a tuple with device latitude and longitude... these may be None""" |
device_json = self.get_device_json(use_cached)
lat = device_json.get("dpMapLat")
lon = device_json.get("dpMapLong")
return (float(lat) if lat else None,
float(lon) if lon else None, ) |
<SYSTEM_TASK:>
Add a device to a group, if the group doesn't exist it is created
<END_TASK>
<USER_TASK:>
Description:
def add_to_group(self, group_path):
"""Add a device to a group, if the group doesn't exist it is created
:param group_path: Path or "name" of the group
""" |
if self.get_group_path() != group_path:
post_data = ADD_GROUP_TEMPLATE.format(connectware_id=self.get_connectware_id(),
group_path=group_path)
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None |
<SYSTEM_TASK:>
Add a tag to existing device tags. This method will not add a duplicate, if already in the list.
<END_TASK>
<USER_TASK:>
Description:
def add_tag(self, new_tags):
"""Add a tag to existing device tags. This method will not add a duplicate, if already in the list.
:param new_tags: the tag(s) to be added. new_tags can be a comma-separated string or list
""" |
tags = self.get_tags()
orig_tag_cnt = len(tags)
# print("self.get_tags() {}".format(tags))
if isinstance(new_tags, six.string_types):
new_tags = new_tags.split(',')
# print("spliting tags :: {}".format(new_tags))
for tag in new_tags:
if not tag in tags:
tags.append(tag.strip())
if len(tags) > orig_tag_cnt:
xml_tags = escape(",".join(tags))
post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(),
tags=xml_tags)
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None |
<SYSTEM_TASK:>
Remove tag from existing device tags
<END_TASK>
<USER_TASK:>
Description:
def remove_tag(self, tag):
"""Remove tag from existing device tags
:param tag: the tag to be removed from the list
:raises ValueError: If tag does not exist in list
""" |
tags = self.get_tags()
tags.remove(tag)
post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(),
tags=escape(",".join(tags)))
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None |
<SYSTEM_TASK:>
Get the hostname that this connection is associated with
<END_TASK>
<USER_TASK:>
Description:
def hostname(self):
"""Get the hostname that this connection is associated with""" |
from six.moves.urllib.parse import urlparse
return urlparse(self._base_url).netloc.split(':', 1)[0] |
<SYSTEM_TASK:>
Return an iterator over JSON items from a paginated resource
<END_TASK>
<USER_TASK:>
Description:
def iter_json_pages(self, path, page_size=1000, **params):
"""Return an iterator over JSON items from a paginated resource
Legacy resources (prior to V1) implemented a common paging interfaces for
several different resources. This method handles the details of iterating
over the paged result set, yielding only the JSON data for each item
within the aggregate resource.
:param str path: The base path to the resource being requested (e.g. /ws/Group)
:param int page_size: The number of items that should be requested for each page. A larger
page_size may mean fewer HTTP requests but could also increase the time to get a first
result back from Device Cloud.
:param params: These are additional query parameters that should be sent with each
request to Device Cloud.
""" |
path = validate_type(path, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
offset = 0
remaining_size = 1 # just needs to be non-zero
while remaining_size > 0:
reqparams = {"start": offset, "size": page_size}
reqparams.update(params)
response = self.get_json(path, params=reqparams)
offset += page_size
remaining_size = int(response.get("remainingSize", "0"))
for item_json in response.get("items", []):
yield item_json |
<SYSTEM_TASK:>
Perform an HTTP GET request of the specified path in Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def get(self, path, **kwargs):
"""Perform an HTTP GET request of the specified path in Device Cloud
Make an HTTP GET request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to GET
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object
""" |
url = self._make_url(path)
return self._make_request("GET", url, **kwargs) |
<SYSTEM_TASK:>
Perform an HTTP GET request with JSON headers of the specified path against Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def get_json(self, path, **kwargs):
"""Perform an HTTP GET request with JSON headers of the specified path against Device Cloud
Make an HTTP GET request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
This method will automatically add the ``Accept: application/json`` and parse the
JSON response from Device Cloud.
:param str path: Device Cloud path to GET
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A python data structure containing the results of calling ``json.loads`` on the
body of the response from Device Cloud.
""" |
url = self._make_url(path)
headers = kwargs.setdefault('headers', {})
headers.update({'Accept': 'application/json'})
response = self._make_request("GET", url, **kwargs)
return json.loads(response.text) |
<SYSTEM_TASK:>
Perform an HTTP POST request of the specified path in Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def post(self, path, data, **kwargs):
"""Perform an HTTP POST request of the specified path in Device Cloud
Make an HTTP POST request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to POST
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:param data: The data to be posted in the body of the POST request (see docs for
``requests.post``
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object
""" |
url = self._make_url(path)
return self._make_request("POST", url, data=data, **kwargs) |
<SYSTEM_TASK:>
Perform an HTTP PUT request of the specified path in Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def put(self, path, data, **kwargs):
"""Perform an HTTP PUT request of the specified path in Device Cloud
Make an HTTP PUT request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to PUT
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:param data: The data to be posted in the body of the POST request (see docs for
``requests.post``
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object
""" |
url = self._make_url(path)
return self._make_request("PUT", url, data=data, **kwargs) |
<SYSTEM_TASK:>
Perform an HTTP DELETE request of the specified path in Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def delete(self, path, retries=DEFAULT_THROTTLE_RETRIES, **kwargs):
"""Perform an HTTP DELETE request of the specified path in Device Cloud
Make an HTTP DELETE request against Device Cloud with this accounts
credentials and base url. This method uses the
`requests <http://docs.python-requests.org/en/latest/>`_ library
`request method <http://docs.python-requests.org/en/latest/api/#requests.request>`_
and all keyword arguments will be passed on to that method.
:param str path: Device Cloud path to DELETE
:param int retries: The number of times the request should be retried if an
unsuccessful response is received. Most likely, you should leave this at 0.
:raises DeviceCloudHttpException: if a non-success response to the request is received
from Device Cloud
:returns: A requests ``Response`` object
""" |
url = self._make_url(path)
return self._make_request("DELETE", url, **kwargs) |
<SYSTEM_TASK:>
Query an asynchronous SCI job by ID
<END_TASK>
<USER_TASK:>
Description:
def get_async_job(self, job_id):
"""Query an asynchronous SCI job by ID
This is useful if the job was not created with send_sci_async().
:param int job_id: The job ID to query
:returns: The SCI response from GETting the job information
""" |
uri = "/ws/sci/{0}".format(job_id)
# TODO: do parsing here?
return self._conn.get(uri) |
<SYSTEM_TASK:>
Send an asynchronous SCI request, and wraps the job in an object
<END_TASK>
<USER_TASK:>
Description:
def send_sci_async(self, operation, target, payload, **sci_options):
"""Send an asynchronous SCI request, and wraps the job in an object
to manage it
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
""" |
sci_options['synchronous'] = False
resp = self.send_sci(operation, target, payload, **sci_options)
dom = ET.fromstring(resp.content)
job_element = dom.find('.//jobId')
if job_element is None:
return
job_id = int(job_element.text)
return AsyncRequestProxy(job_id, self._conn) |
<SYSTEM_TASK:>
Send SCI request to 1 or more targets
<END_TASK>
<USER_TASK:>
Description:
def send_sci(self, operation, target, payload, reply=None, synchronous=None, sync_timeout=None,
cache=None, allow_offline=None, wait_for_reconnect=None):
"""Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
""" |
if not isinstance(payload, six.string_types) and not isinstance(payload, six.binary_type):
raise TypeError("payload is required to be a string or bytes")
# validate targets and bulid targets xml section
try:
iter(target)
targets = target
except TypeError:
targets = [target, ]
if not all(isinstance(t, TargetABC) for t in targets):
raise TypeError("Target(s) must each be instances of TargetABC")
targets_xml = "".join(t.to_xml() for t in targets)
# reply argument
if not isinstance(reply, (type(None), six.string_types)):
raise TypeError("reply must be either None or a string")
if reply is not None:
reply_xml = ' reply="{}"'.format(reply)
else:
reply_xml = ''
# synchronous argument
if not isinstance(synchronous, (type(None), bool)):
raise TypeError("synchronous expected to be either None or a boolean")
if synchronous is not None:
synchronous_xml = ' synchronous="{}"'.format('true' if synchronous else 'false')
else:
synchronous_xml = ''
# sync_timeout argument
# TODO: What units is syncTimeout in? seconds?
if sync_timeout is not None and not isinstance(sync_timeout, six.integer_types):
raise TypeError("sync_timeout expected to either be None or a number")
if sync_timeout is not None:
sync_timeout_xml = ' syncTimeout="{}"'.format(sync_timeout)
else:
sync_timeout_xml = ''
# cache argument
if not isinstance(cache, (type(None), bool)):
raise TypeError("cache expected to either be None or a boolean")
if cache is not None:
cache_xml = ' cache="{}"'.format('true' if cache else 'false')
else:
cache_xml = ''
# allow_offline argument
if not isinstance(allow_offline, (type(None), bool)):
raise TypeError("allow_offline is expected to be either None or a boolean")
if allow_offline is not None:
allow_offline_xml = ' allowOffline="{}"'.format('true' if allow_offline else 'false')
else:
allow_offline_xml = ''
# wait_for_reconnect argument
if not isinstance(wait_for_reconnect, (type(None), bool)):
raise TypeError("wait_for_reconnect expected to be either None or a boolean")
if wait_for_reconnect is not None:
wait_for_reconnect_xml = ' waitForReconnect="{}"'.format('true' if wait_for_reconnect else 'false')
else:
wait_for_reconnect_xml = ''
full_request = SCI_TEMPLATE.format(
operation=operation,
targets=targets_xml,
reply=reply_xml,
synchronous=synchronous_xml,
sync_timeout=sync_timeout_xml,
cache=cache_xml,
allow_offline=allow_offline_xml,
wait_for_reconnect=wait_for_reconnect_xml,
payload=payload
)
# TODO: do parsing here?
return self._conn.post("/ws/sci", full_request) |
<SYSTEM_TASK:>
Write to stream using fmt and value if value is not None
<END_TASK>
<USER_TASK:>
Description:
def conditional_write(strm, fmt, value, *args, **kwargs):
"""Write to stream using fmt and value if value is not None""" |
if value is not None:
strm.write(fmt.format(value, *args, **kwargs)) |
<SYSTEM_TASK:>
Given an ISO8601 string as returned by Device Cloud, convert to a datetime object
<END_TASK>
<USER_TASK:>
Description:
def iso8601_to_dt(iso8601):
"""Given an ISO8601 string as returned by Device Cloud, convert to a datetime object""" |
# We could just use arrow.get() but that is more permissive than we actually want.
# Internal (but still public) to arrow is the actual parser where we can be
# a bit more specific
parser = DateTimeParser()
try:
arrow_dt = arrow.Arrow.fromdatetime(parser.parse_iso(iso8601))
return arrow_dt.to('utc').datetime
except ParserError as pe:
raise ValueError("Provided was not a valid ISO8601 string: %r" % pe) |
<SYSTEM_TASK:>
Convert ``input`` to either None or a datetime object
<END_TASK>
<USER_TASK:>
Description:
def to_none_or_dt(input):
"""Convert ``input`` to either None or a datetime object
If the input is None, None will be returned.
If the input is a datetime object, it will be converted to a datetime
object with UTC timezone info. If the datetime object is naive, then
this method will assume the object is specified according to UTC and
not local or some other timezone.
If the input to the function is a string, this method will attempt to
parse the input as an ISO-8601 formatted string.
:param input: Input data (expected to be either str, None, or datetime object)
:return: datetime object from input or None if already None
:rtype: datetime or None
""" |
if input is None:
return input
elif isinstance(input, datetime.datetime):
arrow_dt = arrow.Arrow.fromdatetime(input, input.tzinfo or 'utc')
return arrow_dt.to('utc').datetime
if isinstance(input, six.string_types):
# try to convert from ISO8601
return iso8601_to_dt(input)
else:
raise TypeError("Not a string, NoneType, or datetime object") |
<SYSTEM_TASK:>
Return an ISO-8601 formatted string from the provided datetime object
<END_TASK>
<USER_TASK:>
Description:
def isoformat(dt):
"""Return an ISO-8601 formatted string from the provided datetime object""" |
if not isinstance(dt, datetime.datetime):
raise TypeError("Must provide datetime.datetime object to isoformat")
if dt.tzinfo is None:
raise ValueError("naive datetime objects are not allowed beyond the library boundaries")
return dt.isoformat().replace("+00:00", "Z") |
<SYSTEM_TASK:>
Return a generator over all results matching the provided condition
<END_TASK>
<USER_TASK:>
Description:
def get_filedata(self, condition=None, page_size=1000):
"""Return a generator over all results matching the provided condition
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the filedata that will be retrieved from
file data store. If a condition is unspecified, the following condition
will be used ``fd_path == '~/'``. This condition will match all file
data in this accounts "home" directory (a sensible root).
:type condition: :class:`.Expression` or None
:param int page_size: The number of results to fetch in a single page. Regardless
of the size specified, :meth:`.get_filedata` will continue to fetch pages
and yield results until all items have been fetched.
:return: Generator yielding :class:`.FileDataObject` instances matching the
provided conditions.
""" |
condition = validate_type(condition, type(None), Expression, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
if condition is None:
condition = (fd_path == "~/") # home directory
params = {"embed": "true", "condition": condition.compile()}
for fd_json in self._conn.iter_json_pages("/ws/FileData", page_size=page_size, **params):
yield FileDataObject.from_json(self, fd_json) |
<SYSTEM_TASK:>
Write a file to the file data store at the given path
<END_TASK>
<USER_TASK:>
Description:
def write_file(self, path, name, data, content_type=None, archive=False,
raw=False):
"""Write a file to the file data store at the given path
:param str path: The path (directory) into which the file should be written.
:param str name: The name of the file to be written.
:param data: The binary data that should be written into the file.
:type data: str (Python2) or bytes (Python3)
:param content_type: The content type for the data being written to the file. May
be left unspecified.
:type content_type: str or None
:param bool archive: If true, history will be retained for various revisions of this
file. If this is not required, leave as false.
:param bool raw: If true, skip the FileData XML headers (necessary for binary files)
""" |
path = validate_type(path, *six.string_types)
name = validate_type(name, *six.string_types)
data = validate_type(data, six.binary_type)
content_type = validate_type(content_type, type(None), *six.string_types)
archive_str = "true" if validate_type(archive, bool) else "false"
if not path.startswith("/"):
path = "/" + path
if not path.endswith("/"):
path += "/"
name = name.lstrip("/")
sio = six.moves.StringIO()
if not raw:
if six.PY3:
base64_encoded_data = base64.encodebytes(data).decode('utf-8')
else:
base64_encoded_data = base64.encodestring(data)
sio.write("<FileData>")
if content_type is not None:
sio.write("<fdContentType>{}</fdContentType>".format(content_type))
sio.write("<fdType>file</fdType>")
sio.write("<fdData>{}</fdData>".format(base64_encoded_data))
sio.write("<fdArchive>{}</fdArchive>".format(archive_str))
sio.write("</FileData>")
else:
sio.write(data)
params = {
"type": "file",
"archive": archive_str
}
self._conn.put(
"/ws/FileData{path}{name}".format(path=path, name=name),
sio.getvalue(),
params=params) |
<SYSTEM_TASK:>
Delete a file or directory from the filedata store
<END_TASK>
<USER_TASK:>
Description:
def delete_file(self, path):
"""Delete a file or directory from the filedata store
This method removes a file or directory (recursively) from
the filedata store.
:param path: The path of the file or directory to remove
from the file data store.
""" |
path = validate_type(path, *six.string_types)
if not path.startswith("/"):
path = "/" + path
self._conn.delete("/ws/FileData{path}".format(path=path)) |
<SYSTEM_TASK:>
Emulation of os.walk behavior against Device Cloud filedata store
<END_TASK>
<USER_TASK:>
Description:
def walk(self, root="~/"):
"""Emulation of os.walk behavior against Device Cloud filedata store
This method will yield tuples in the form ``(dirpath, FileDataDirectory's, FileData's)``
recursively in pre-order (depth first from top down).
:param str root: The root path from which the search should commence. By default, this
is the root directory for this device cloud account (~).
:return: Generator yielding 3-tuples of dirpath, directories, and files
:rtype: 3-tuple in form (dirpath, list of :class:`FileDataDirectory`, list of :class:`FileDataFile`)
""" |
root = validate_type(root, *six.string_types)
directories = []
files = []
# fd_path is real picky
query_fd_path = root
if not query_fd_path.endswith("/"):
query_fd_path += "/"
for fd_object in self.get_filedata(fd_path == query_fd_path):
if fd_object.get_type() == "directory":
directories.append(fd_object)
else:
files.append(fd_object)
# Yield the walk results for this level of the tree
yield (root, directories, files)
# recurse on each directory and yield results up the chain
for directory in directories:
for dirpath, directories, files in self.walk(directory.get_full_path()):
yield (dirpath, directories, files) |
<SYSTEM_TASK:>
Get the data associated with this filedata object
<END_TASK>
<USER_TASK:>
Description:
def get_data(self):
"""Get the data associated with this filedata object
:returns: Data associated with this object or None if none exists
:rtype: str (Python2)/bytes (Python3) or None
""" |
# NOTE: we assume that the "embed" option is used
base64_data = self._json_data.get("fdData")
if base64_data is None:
return None
else:
# need to convert to bytes() with python 3
return base64.decodestring(six.b(base64_data)) |
<SYSTEM_TASK:>
Write a file into this directory
<END_TASK>
<USER_TASK:>
Description:
def write_file(self, *args, **kwargs):
"""Write a file into this directory
This method takes the same arguments as :meth:`.FileDataAPI.write_file`
with the exception of the ``path`` argument which is not needed here.
""" |
return self._fdapi.write_file(self.get_path(), *args, **kwargs) |
<SYSTEM_TASK:>
Return an iterator over all monitors matching the provided condition
<END_TASK>
<USER_TASK:>
Description:
def get_monitors(self, condition=None, page_size=1000):
"""Return an iterator over all monitors matching the provided condition
Get all inactive monitors and print id::
for mon in dc.monitor.get_monitors(MON_STATUS_ATTR == "DISABLED"):
print(mon.get_id())
Get all the HTTP monitors and print id::
for mon in dc.monitor.get_monitors(MON_TRANSPORT_TYPE_ATTR == "http"):
print(mon.get_id())
Many other possibilities exist. See the :mod:`devicecloud.condition` documention
for additional details on building compound expressions.
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the monitor that will be retrieved from
Device Cloud. If a condition is unspecified, an iterator over
all monitors for this account will be returned.
:type condition: :class:`.Expression` or None
:param int page_size: The number of results to fetch in a single page.
:return: Generator yielding :class:`.DeviceCloudMonitor` instances matching the
provided conditions.
""" |
req_kwargs = {}
if condition:
req_kwargs['condition'] = condition.compile()
for monitor_data in self._conn.iter_json_pages("/ws/Monitor", **req_kwargs):
yield DeviceCloudMonitor.from_json(self._conn, monitor_data, self._tcp_client_manager) |
<SYSTEM_TASK:>
Attempts to find a Monitor in device cloud that matches the provided topics
<END_TASK>
<USER_TASK:>
Description:
def get_monitor(self, topics):
"""Attempts to find a Monitor in device cloud that matches the provided topics
:param topics: a string list of topics (e.g. ``['DeviceCore[U]', 'FileDataCore'])``)
Returns a :class:`DeviceCloudMonitor` if found, otherwise None.
""" |
for monitor in self.get_monitors(MON_TOPIC_ATTR == ",".join(topics)):
return monitor # return the first one, even if there are multiple
return None |
<SYSTEM_TASK:>
A function to get the python type to device cloud type converter function.
<END_TASK>
<USER_TASK:>
Description:
def _get_encoder_method(stream_type):
"""A function to get the python type to device cloud type converter function.
:param stream_type: The streams data type
:return: A function that when called with the python object will return the serializable
type for sending to the cloud. If there is no function for the given type, or the `stream_type`
is `None` the returned function will simply return the object unchanged.
""" |
if stream_type is not None:
return DSTREAM_TYPE_MAP.get(stream_type.upper(), (lambda x: x, lambda x: x))[1]
else:
return lambda x: x |
<SYSTEM_TASK:>
A function to get Device Cloud type to python type converter function.
<END_TASK>
<USER_TASK:>
Description:
def _get_decoder_method(stream_type):
""" A function to get Device Cloud type to python type converter function.
:param stream_type: The streams data type
:return: A function that when called with Device Cloud object will return the python
native type. If there is no function for the given type, or the `stream_type` is `None`
the returned function will simply return the object unchanged.
""" |
if stream_type is not None:
return DSTREAM_TYPE_MAP.get(stream_type.upper(), (lambda x: x, lambda x: x))[0]
else:
return lambda x: x |
<SYSTEM_TASK:>
Create a new data stream on Device Cloud
<END_TASK>
<USER_TASK:>
Description:
def create_stream(self, stream_id, data_type, description=None, data_ttl=None,
rollup_ttl=None, units=None):
"""Create a new data stream on Device Cloud
This method will attempt to create a new data stream on Device Cloud.
This method will only succeed if the stream does not already exist.
:param str stream_id: The path/id of the stream being created on Device Cloud.
:param str data_type: The type of this stream. This must be in the set
`{ INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }`. These values are
available in constants like :attr:`~STREAM_TYPE_INTEGER`.
:param str description: An optional description of this stream. See :meth:`~DataStream.get_description`.
:param int data_ttl: The TTL for data points in this stream. See :meth:`~DataStream.get_data_ttl`.
:param int rollup_ttl: The TTL for performing rollups on data. See :meth:~DataStream.get_rollup_ttl`.
:param str units: Units for data in this stream. See :meth:`~DataStream.get_units`
""" |
stream_id = validate_type(stream_id, *six.string_types)
data_type = validate_type(data_type, type(None), *six.string_types)
if isinstance(data_type, *six.string_types):
data_type = str(data_type).upper()
if not data_type in (set([None, ]) | set(list(DSTREAM_TYPE_MAP.keys()))):
raise ValueError("data_type %r is not valid" % data_type)
description = validate_type(description, type(None), *six.string_types)
data_ttl = validate_type(data_ttl, type(None), *six.integer_types)
rollup_ttl = validate_type(rollup_ttl, type(None), *six.integer_types)
units = validate_type(units, type(None), *six.string_types)
sio = StringIO()
sio.write("<DataStream>")
conditional_write(sio, "<streamId>{}</streamId>", stream_id)
conditional_write(sio, "<dataType>{}</dataType>", data_type)
conditional_write(sio, "<description>{}</description>", description)
conditional_write(sio, "<dataTtl>{}</dataTtl>", data_ttl)
conditional_write(sio, "<rollupTtl>{}</rollupTtl>", rollup_ttl)
conditional_write(sio, "<units>{}</units>", units)
sio.write("</DataStream>")
self._conn.post("/ws/DataStream", sio.getvalue())
logger.info("Data stream (%s) created successfully", stream_id)
stream = DataStream(self._conn, stream_id)
return stream |
<SYSTEM_TASK:>
Return a reference to a stream with the given ``stream_id`` if it exists
<END_TASK>
<USER_TASK:>
Description:
def get_stream_if_exists(self, stream_id):
"""Return a reference to a stream with the given ``stream_id`` if it exists
This works similar to :py:meth:`get_stream` but will return None if the
stream is not already created.
:param stream_id: The path of the stream on Device Cloud
:raises TypeError: if the stream_id provided is the wrong type
:raises ValueError: if the stream_id is not properly formed
:return: :class:`.DataStream` instance with the provided stream_id
:rtype: :class:`~DataStream`
""" |
stream = self.get_stream(stream_id)
try:
stream.get_data_type(use_cached=True)
except NoSuchStreamException:
return None
else:
return stream |
<SYSTEM_TASK:>
Create a new DataPoint object from device cloud JSON data
<END_TASK>
<USER_TASK:>
Description:
def from_json(cls, stream, json_data):
"""Create a new DataPoint object from device cloud JSON data
:param DataStream stream: The :class:`~DataStream` out of which this data is coming
:param dict json_data: Deserialized JSON data from Device Cloud about this device
:raises ValueError: if the data is malformed
:return: (:class:`~DataPoint`) newly created :class:`~DataPoint`
""" |
type_converter = _get_decoder_method(stream.get_data_type())
data = type_converter(json_data.get("data"))
return cls(
# these are actually properties of the stream, not the data point
stream_id=stream.get_stream_id(),
data_type=stream.get_data_type(),
units=stream.get_units(),
# and these are part of the data point itself
data=data,
description=json_data.get("description"),
timestamp=json_data.get("timestampISO"),
server_timestamp=json_data.get("serverTimestampISO"),
quality=json_data.get("quality"),
location=json_data.get("location"),
dp_id=json_data.get("id"),
) |
<SYSTEM_TASK:>
Rollup json data from the server looks slightly different
<END_TASK>
<USER_TASK:>
Description:
def from_rollup_json(cls, stream, json_data):
"""Rollup json data from the server looks slightly different
:param DataStream stream: The :class:`~DataStream` out of which this data is coming
:param dict json_data: Deserialized JSON data from Device Cloud about this device
:raises ValueError: if the data is malformed
:return: (:class:`~DataPoint`) newly created :class:`~DataPoint`
""" |
dp = cls.from_json(stream, json_data)
# Special handling for timestamp
timestamp = isoformat(dc_utc_timestamp_to_dt(int(json_data.get("timestamp"))))
# Special handling for data, all rollup data is float type
type_converter = _get_decoder_method(stream.get_data_type())
data = type_converter(float(json_data.get("data")))
# Update the special fields
dp.set_timestamp(timestamp)
dp.set_data(data)
return dp |
<SYSTEM_TASK:>
Set the stream id associated with this data point
<END_TASK>
<USER_TASK:>
Description:
def set_stream_id(self, stream_id):
"""Set the stream id associated with this data point""" |
stream_id = validate_type(stream_id, type(None), *six.string_types)
if stream_id is not None:
stream_id = stream_id.lstrip('/')
self._stream_id = stream_id |
<SYSTEM_TASK:>
Set the description for this data point
<END_TASK>
<USER_TASK:>
Description:
def set_description(self, description):
"""Set the description for this data point""" |
self._description = validate_type(description, type(None), *six.string_types) |
<SYSTEM_TASK:>
Set the quality for this sample
<END_TASK>
<USER_TASK:>
Description:
def set_quality(self, quality):
"""Set the quality for this sample
Quality is stored on Device Cloud as a 32-bit integer, so the input
to this function should be either None, an integer, or a string that can
be converted to an integer.
""" |
if isinstance(quality, *six.string_types):
quality = int(quality)
elif isinstance(quality, float):
quality = int(quality)
self._quality = validate_type(quality, type(None), *six.integer_types) |
<SYSTEM_TASK:>
Set the location for this data point
<END_TASK>
<USER_TASK:>
Description:
def set_location(self, location):
"""Set the location for this data point
The location must be either None (if no location data is known) or a
3-tuple of floating point values in the form
(latitude-degrees, longitude-degrees, altitude-meters).
""" |
if location is None:
self._location = location
elif isinstance(location, *six.string_types): # from device cloud, convert from csv
parts = str(location).split(",")
if len(parts) == 3:
self._location = tuple(map(float, parts))
return
else:
raise ValueError("Location string %r has unexpected format" % location)
# TODO: could maybe try to allow any iterable but this covers the most common cases
elif (isinstance(location, (tuple, list))
and len(location) == 3
and all([isinstance(x, (float, six.integer_types)) for x in location])):
self._location = tuple(map(float, location)) # coerce ints to float
else:
raise TypeError("Location must be None or 3-tuple of floats")
self._location = location |
<SYSTEM_TASK:>
Set the data type for ths data point
<END_TASK>
<USER_TASK:>
Description:
def set_data_type(self, data_type):
"""Set the data type for ths data point
The data type is actually associated with the stream itself and should
not (generally) vary on a point-per-point basis. That being said, if
creating a new stream by writing a datapoint, it may be beneficial to
include this information.
The data type provided should be in the set of available data types of
{ INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }.
""" |
validate_type(data_type, type(None), *six.string_types)
if isinstance(data_type, *six.string_types):
data_type = str(data_type).upper()
if not data_type in ({None} | set(DSTREAM_TYPE_MAP.keys())):
raise ValueError("Provided data type not in available set of types")
self._data_type = data_type |
<SYSTEM_TASK:>
Set the unit for this data point
<END_TASK>
<USER_TASK:>
Description:
def set_units(self, unit):
"""Set the unit for this data point
Unit, as with data_type, are actually associated with the stream and not
the individual data point. As such, changing this within a stream is
not encouraged. Setting the unit on the data point is useful when the
stream might be created with the write of a data point.
""" |
self._units = validate_type(unit, type(None), *six.string_types) |
<SYSTEM_TASK:>
Convert this datapoint into a form suitable for pushing to device cloud
<END_TASK>
<USER_TASK:>
Description:
def to_xml(self):
"""Convert this datapoint into a form suitable for pushing to device cloud
An XML string will be returned that will contain all pieces of information
set on this datapoint. Values not set (e.g. quality) will be ommitted.
""" |
type_converter = _get_encoder_method(self._data_type)
# Convert from python native to device cloud
encoded_data = type_converter(self._data)
out = StringIO()
out.write("<DataPoint>")
out.write("<streamId>{}</streamId>".format(self.get_stream_id()))
out.write("<data>{}</data>".format(encoded_data))
conditional_write(out, "<description>{}</description>", self.get_description())
if self.get_timestamp() is not None:
out.write("<timestamp>{}</timestamp>".format(isoformat(self.get_timestamp())))
conditional_write(out, "<quality>{}</quality>", self.get_quality())
if self.get_location() is not None:
out.write("<location>%s</location>" % ",".join(map(str, self.get_location())))
conditional_write(out, "<streamType>{}</streamType>", self.get_data_type())
conditional_write(out, "<streamUnits>{}</streamUnits>", self.get_units())
out.write("</DataPoint>")
return out.getvalue() |
<SYSTEM_TASK:>
Get the data type of this stream if it exists
<END_TASK>
<USER_TASK:>
Description:
def get_data_type(self, use_cached=True):
"""Get the data type of this stream if it exists
The data type is the type of data stored in this data stream. Valid types include:
* INTEGER - data can be represented with a network (= big-endian) 32-bit two's-complement integer. Data
with this type maps to a python int.
* LONG - data can be represented with a network (= big-endian) 64-bit two's complement integer. Data
with this type maps to a python int.
* FLOAT - data can be represented with a network (= big-endian) 32-bit IEEE754 floating point. Data
with this type maps to a python float.
* DOUBLE - data can be represented with a network (= big-endian) 64-bit IEEE754 floating point. Data
with this type maps to a python float.
* STRING - UTF-8. Data with this type map to a python string
* BINARY - Data with this type map to a python string.
* UNKNOWN - Data with this type map to a python string.
:param bool use_cached: If False, the function will always request the latest from Device Cloud.
If True, the device will not make a request if it already has cached data.
:return: The data type of this stream as a string
:rtype: str
""" |
dtype = self._get_stream_metadata(use_cached).get("dataType")
if dtype is not None:
dtype = dtype.upper()
return dtype |
<SYSTEM_TASK:>
Retrieve the dataTTL for this stream
<END_TASK>
<USER_TASK:>
Description:
def get_data_ttl(self, use_cached=True):
"""Retrieve the dataTTL for this stream
The dataTtl is the time to live (TTL) in seconds for data points stored in the data stream.
A data point expires after the configured amount of time and is automatically deleted.
:param bool use_cached: If False, the function will always request the latest from Device Cloud.
If True, the device will not make a request if it already has cached data.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created
:return: The dataTtl associated with this stream in seconds
:rtype: int or None
""" |
data_ttl_text = self._get_stream_metadata(use_cached).get("dataTtl")
return int(data_ttl_text) |
<SYSTEM_TASK:>
Retrieve the rollupTtl for this stream
<END_TASK>
<USER_TASK:>
Description:
def get_rollup_ttl(self, use_cached=True):
"""Retrieve the rollupTtl for this stream
The rollupTtl is the time to live (TTL) in seconds for the aggregate roll-ups of data points
stored in the stream. A roll-up expires after the configured amount of time and is
automatically deleted.
:param bool use_cached: If False, the function will always request the latest from Device Cloud.
If True, the device will not make a request if it already has cached data.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created
:return: The rollupTtl associated with this stream in seconds
:rtype: int or None
""" |
rollup_ttl_text = self._get_stream_metadata(use_cached).get("rollupTtl")
return int(rollup_ttl_text) |
<SYSTEM_TASK:>
Return the most recent DataPoint value written to a stream
<END_TASK>
<USER_TASK:>
Description:
def get_current_value(self, use_cached=False):
"""Return the most recent DataPoint value written to a stream
The current value is the last recorded data point for this stream.
:param bool use_cached: If False, the function will always request the latest from Device Cloud.
If True, the device will not make a request if it already has cached data.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has not yet been created
:return: The most recent value written to this stream (or None if nothing has been written)
:rtype: :class:`~DataPoint` or None
""" |
current_value = self._get_stream_metadata(use_cached).get("currentValue")
if current_value:
return DataPoint.from_json(self, current_value)
else:
return None |
<SYSTEM_TASK:>
Delete this stream from Device Cloud along with its history
<END_TASK>
<USER_TASK:>
Description:
def delete(self):
"""Delete this stream from Device Cloud along with its history
This call will return None on success and raise an exception in the event of an error
performing the deletion.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
:raises devicecloud.streams.NoSuchStreamException: if this stream has already been deleted
""" |
try:
self._conn.delete("/ws/DataStream/{}".format(self.get_stream_id()))
except DeviceCloudHttpException as http_excpeption:
if http_excpeption.response.status_code == 404:
raise NoSuchStreamException() # this branch is present, but the DC appears to just return 200 again
else:
raise http_excpeption |
<SYSTEM_TASK:>
Delete the provided datapoint from this stream
<END_TASK>
<USER_TASK:>
Description:
def delete_datapoint(self, datapoint):
"""Delete the provided datapoint from this stream
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
""" |
datapoint = validate_type(datapoint, DataPoint)
self._conn.delete("/ws/DataPoint/{stream_id}/{datapoint_id}".format(
stream_id=self.get_stream_id(),
datapoint_id=datapoint.get_id(),
)) |
<SYSTEM_TASK:>
Delete datapoints from this stream between the provided start and end times
<END_TASK>
<USER_TASK:>
Description:
def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None):
"""Delete datapoints from this stream between the provided start and end times
If neither a start or end time is specified, all data points in the stream
will be deleted.
:param start_dt: The datetime after which data points should be deleted or None
if all data points from the beginning of time should be deleted.
:param end_dt: The datetime before which data points should be deleted or None
if all data points until the current time should be deleted.
:raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
""" |
start_dt = to_none_or_dt(validate_type(start_dt, datetime.datetime, type(None)))
end_dt = to_none_or_dt(validate_type(end_dt, datetime.datetime, type(None)))
params = {}
if start_dt is not None:
params['startTime'] = isoformat(start_dt)
if end_dt is not None:
params['endTime'] = isoformat(end_dt)
self._conn.delete("/ws/DataPoint/{stream_id}{querystring}".format(
stream_id=self.get_stream_id(),
querystring="?" + urllib.parse.urlencode(params) if params else "",
)) |
<SYSTEM_TASK:>
Write some raw data to a stream using the DataPoint API
<END_TASK>
<USER_TASK:>
Description:
def write(self, datapoint):
"""Write some raw data to a stream using the DataPoint API
This method will mutate the datapoint provided to populate it with information
available from the stream as it is available (but without making any new HTTP
requests). For instance, we will add in information about the stream data
type if it is available so that proper type conversion happens.
Values already set on the datapoint will not be overridden (except for path)
:param DataPoint datapoint: The :class:`.DataPoint` that should be written to Device Cloud
""" |
if not isinstance(datapoint, DataPoint):
raise TypeError("First argument must be a DataPoint object")
datapoint._stream_id = self.get_stream_id()
if self._cached_data is not None and datapoint.get_data_type() is None:
datapoint._data_type = self.get_data_type()
self._conn.post("/ws/DataPoint/{}".format(self.get_stream_id()), datapoint.to_xml()) |
<SYSTEM_TASK:>
Read one or more DataPoints from a stream
<END_TASK>
<USER_TASK:>
Description:
def read(self, start_time=None, end_time=None, use_client_timeline=True, newest_first=True,
rollup_interval=None, rollup_method=None, timezone=None, page_size=1000):
"""Read one or more DataPoints from a stream
.. warning::
The data points from Device Cloud is a paged data set. When iterating over the
result set there could be delays when we hit the end of a page. If this is undesirable,
the caller should collect all results into a data structure first before iterating over
the result set.
:param start_time: The start time for the window of data points to read. None means
that we should start with the oldest data available.
:type start_time: :class:`datetime.datetime` or None
:param end_time: The end time for the window of data points to read. None means
that we should include all points received until this point in time.
:type end_time: :class:`datetime.datetime` or None
:param bool use_client_timeline: If True, the times used will be those provided by
clients writing data points into the cloud (which also default to server time
if the a timestamp was not included by the client). This is usually what you
want. If False, the server timestamp will be used which records when the data
point was received.
:param bool newest_first: If True, results will be ordered from newest to oldest (descending order).
If False, results will be returned oldest to newest.
:param rollup_interval: the roll-up interval that should be used if one is desired at all. Rollups
will not be performed if None is specified for the interval. Valid roll-up interval values
are None, "half", "hourly", "day", "week", and "month". See `DataPoints documentation
<http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_
for additional details on these values.
:type rollup_interval: str or None
:param rollup_method: The aggregation applied to values in the points within the specified
rollup_interval. Available methods are None, "sum", "average", "min", "max", "count", and
"standarddev". See `DataPoint documentation
<http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_
for additional details on these values.
:type rollup_method: str or None
:param timezone: timezone for calculating roll-ups. This determines roll-up interval
boundaries and only applies to roll-ups of a day or larger (for example, day,
week, or month). Note that it does not apply to the startTime and endTime parameters.
See the `Timestamps <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#timestamp>`_
and `Supported Time Zones <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#TimeZones>`_
sections for more information.
:type timezone: str or None
:param int page_size: The number of results that we should attempt to retrieve from the
device cloud in each page. Generally, this can be left at its default value unless
you have a good reason to change the parameter for performance reasons.
:returns: A generator object which one can iterate over the DataPoints read.
""" |
is_rollup = False
if (rollup_interval is not None) or (rollup_method is not None):
is_rollup = True
numeric_types = [
STREAM_TYPE_INTEGER,
STREAM_TYPE_LONG,
STREAM_TYPE_FLOAT,
STREAM_TYPE_DOUBLE,
STREAM_TYPE_STRING,
STREAM_TYPE_BINARY,
STREAM_TYPE_UNKNOWN,
]
if self.get_data_type(use_cached=True) not in numeric_types:
raise InvalidRollupDatatype('Rollups only support numerical DataPoints')
# Validate function inputs
start_time = to_none_or_dt(validate_type(start_time, datetime.datetime, type(None)))
end_time = to_none_or_dt(validate_type(end_time, datetime.datetime, type(None)))
use_client_timeline = validate_type(use_client_timeline, bool)
newest_first = validate_type(newest_first, bool)
rollup_interval = validate_type(rollup_interval, type(None), *six.string_types)
if not rollup_interval in {None,
ROLLUP_INTERVAL_HALF,
ROLLUP_INTERVAL_HOUR,
ROLLUP_INTERVAL_DAY,
ROLLUP_INTERVAL_WEEK,
ROLLUP_INTERVAL_MONTH, }:
raise ValueError("Invalid rollup_interval %r provided" % (rollup_interval, ))
rollup_method = validate_type(rollup_method, type(None), *six.string_types)
if not rollup_method in {None,
ROLLUP_METHOD_SUM,
ROLLUP_METHOD_AVERAGE,
ROLLUP_METHOD_MIN,
ROLLUP_METHOD_MAX,
ROLLUP_METHOD_COUNT,
ROLLUP_METHOD_STDDEV}:
raise ValueError("Invalid rollup_method %r provided" % (rollup_method, ))
timezone = validate_type(timezone, type(None), *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
# Remember that there could be multiple pages of data and we want to provide
# in iterator over the result set. To start the process out, we need to make
# an initial request without a page cursor. We should get one in response to
# our first request which we will use to page through the result set
query_parameters = {
'timeline': 'client' if use_client_timeline else 'server',
'order': 'descending' if newest_first else 'ascending',
'size': page_size
}
if start_time is not None:
query_parameters["startTime"] = isoformat(start_time)
if end_time is not None:
query_parameters["endTime"] = isoformat(end_time)
if rollup_interval is not None:
query_parameters["rollupInterval"] = rollup_interval
if rollup_method is not None:
query_parameters["rollupMethod"] = rollup_method
if timezone is not None:
query_parameters["timezone"] = timezone
result_size = page_size
while result_size == page_size:
# request the next page of data or first if pageCursor is not set as query param
try:
result = self._conn.get_json("/ws/DataPoint/{stream_id}?{query_params}".format(
stream_id=self.get_stream_id(),
query_params=urllib.parse.urlencode(query_parameters)
))
except DeviceCloudHttpException as http_exception:
if http_exception.response.status_code == 404:
raise NoSuchStreamException()
raise http_exception
result_size = int(result["resultSize"]) # how many are actually included here?
query_parameters["pageCursor"] = result.get("pageCursor") # will not be present if result set is empty
for item_info in result.get("items", []):
if is_rollup:
data_point = DataPoint.from_rollup_json(self, item_info)
else:
data_point = DataPoint.from_json(self, item_info)
yield data_point |
<SYSTEM_TASK:>
Perform a read on input socket to consume headers and then return
<END_TASK>
<USER_TASK:>
Description:
def _read_msg_header(session):
"""
Perform a read on input socket to consume headers and then return
a tuple of message type, message length.
:param session: Push Session to read data for.
Returns response type (i.e. PUBLISH_MESSAGE) if header was completely
read, otherwise None if header was not completely read.
""" |
try:
data = session.socket.recv(6 - len(session.data))
if len(data) == 0: # No Data on Socket. Likely closed.
return NO_DATA
session.data += data
# Data still not completely read.
if len(session.data) < 6:
return INCOMPLETE
except ssl.SSLError:
# This can happen when select gets triggered
# for an SSL socket and data has not yet been
# read.
return INCOMPLETE
session.message_length = struct.unpack('!i', session.data[2:6])[0]
response_type = struct.unpack('!H', session.data[0:2])[0]
# Clear out session data as header is consumed.
session.data = six.b("")
return response_type |
<SYSTEM_TASK:>
Perform a read on input socket to consume message and then return the
<END_TASK>
<USER_TASK:>
Description:
def _read_msg(session):
"""
Perform a read on input socket to consume message and then return the
payload and block_id in a tuple.
:param session: Push Session to read data for.
""" |
if len(session.data) == session.message_length:
# Data Already completely read. Return
return True
try:
data = session.socket.recv(session.message_length - len(session.data))
if len(data) == 0:
raise PushException("No Data on Socket!")
session.data += data
except ssl.SSLError:
# This can happen when select gets triggered
# for an SSL socket and data has not yet been
# read. Wait for it to get triggered again.
return False
# Whether or not all data was read.
return len(session.data) == session.message_length |
<SYSTEM_TASK:>
Sends a ConnectionRequest to the iDigi server using the credentials
<END_TASK>
<USER_TASK:>
Description:
def send_connection_request(self):
"""
Sends a ConnectionRequest to the iDigi server using the credentials
established with the id of the monitor as defined in the monitor
member.
""" |
try:
self.log.info("Sending ConnectionRequest for Monitor %s."
% self.monitor_id)
# Send connection request and perform a receive to ensure
# request is authenticated.
# Protocol Version = 1.
payload = struct.pack('!H', 0x01)
# Username Length.
payload += struct.pack('!H', len(self.client.username))
# Username.
payload += six.b(self.client.username)
# Password Length.
payload += struct.pack('!H', len(self.client.password))
# Password.
payload += six.b(self.client.password)
# Monitor ID.
payload += struct.pack('!L', int(self.monitor_id))
# Header 6 Bytes : Type [2 bytes] & Length [4 Bytes]
# ConnectionRequest is Type 0x01.
data = struct.pack("!HL", CONNECTION_REQUEST, len(payload))
# The full payload.
data += payload
# Send Connection Request.
self.socket.send(data)
# Set a 60 second blocking on recv, if we don't get any data
# within 60 seconds, timeout which will throw an exception.
self.socket.settimeout(60)
# Should receive 10 bytes with ConnectionResponse.
response = self.socket.recv(10)
# Make socket blocking.
self.socket.settimeout(0)
if len(response) != 10:
raise PushException("Length of Connection Request Response "
"(%d) is not 10." % len(response))
# Type
response_type = int(struct.unpack("!H", response[0:2])[0])
if response_type != CONNECTION_RESPONSE:
raise PushException(
"Connection Response Type (%d) is not "
"ConnectionResponse Type (%d)." % (response_type, CONNECTION_RESPONSE))
status_code = struct.unpack("!H", response[6:8])[0]
self.log.info("Got ConnectionResponse for Monitor %s. Status %s."
% (self.monitor_id, status_code))
if status_code != STATUS_OK:
raise PushException("Connection Response Status Code (%d) is "
"not STATUS_OK (%d)." % (status_code, STATUS_OK))
except Exception as exception:
# TODO(posborne): This is bad! It isn't necessarily a socket exception!
# Likely a socket exception, close it and raise an exception.
self.socket.close()
self.socket = None
raise exception |
<SYSTEM_TASK:>
Creates a TCP connection to Device Cloud and sends a ConnectionRequest message
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Creates a TCP connection to Device Cloud and sends a ConnectionRequest message""" |
self.log.info("Starting Insecure Session for Monitor %s" % self.monitor_id)
if self.socket is not None:
raise Exception("Socket already established for %s." % self)
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.client.hostname, PUSH_OPEN_PORT))
self.socket.setblocking(0)
except socket.error as exception:
self.socket.close()
self.socket = None
raise
self.send_connection_request() |
<SYSTEM_TASK:>
Creates a SSL connection to the iDigi Server and sends a
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Creates a SSL connection to the iDigi Server and sends a
ConnectionRequest message.
""" |
self.log.info("Starting SSL Session for Monitor %s."
% self.monitor_id)
if self.socket is not None:
raise Exception("Socket already established for %s." % self)
try:
# Create socket, wrap in SSL and connect.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Validate that certificate server uses matches what we expect.
if self.ca_certs is not None:
self.socket = ssl.wrap_socket(self.socket,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
else:
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((self.client.hostname, PUSH_SECURE_PORT))
self.socket.setblocking(0)
except Exception as exception:
self.socket.close()
self.socket = None
raise exception
self.send_connection_request() |
<SYSTEM_TASK:>
Continually blocks until data is on the internal queue, then calls
<END_TASK>
<USER_TASK:>
Description:
def _consume_queue(self):
"""
Continually blocks until data is on the internal queue, then calls
the session's registered callback and sends a PublishMessageReceived
if callback returned True.
""" |
while True:
session, block_id, raw_data = self._queue.get()
data = json.loads(raw_data.decode('utf-8')) # decode as JSON
try:
result = session.callback(data)
if result is None:
self.log.warn("Callback %r returned None, expected boolean. Messages "
"are not marked as received unless True is returned", session.callback)
elif result:
# Send a Successful PublishMessageReceived with the
# block id sent in request
if self._write_queue is not None:
response_message = struct.pack('!HHH',
PUBLISH_MESSAGE_RECEIVED,
block_id, 200)
self._write_queue.put((session.socket, response_message))
except Exception as exception:
self.log.exception(exception)
self._queue.task_done() |
<SYSTEM_TASK:>
Queues up a callback event to occur for a session with the given
<END_TASK>
<USER_TASK:>
Description:
def queue_callback(self, session, block_id, data):
"""
Queues up a callback event to occur for a session with the given
payload data. Will block if the queue is full.
:param session: the session with a defined callback function to call.
:param block_id: the block_id of the message received.
:param data: the data payload of the message received.
""" |
self._queue.put((session, block_id, data)) |
<SYSTEM_TASK:>
Restarts and re-establishes session
<END_TASK>
<USER_TASK:>
Description:
def _restart_session(self, session):
"""Restarts and re-establishes session
:param session: The session to restart
""" |
# remove old session key, if socket is None, that means the
# session was closed by user and there is no need to restart.
if session.socket is not None:
self.log.info("Attempting restart session for Monitor Id %s."
% session.monitor_id)
del self.sessions[session.socket.fileno()]
session.stop()
session.start()
self.sessions[session.socket.fileno()] = session |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.