code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def run(self, call, num_alts):
for key, value in call.data.items():
self._check_count(call, key, value, num_alts) | Check ``FORMAT`` of a record.Call
Currently, only checks for consistent counts are implemented |
def _read_next_line(self):
prev_line = self._line
self._line = self.stream.readline()
return prev_line | Read next line store in self._line and return old one |
def parse_header(self, parsed_samples=None):
# parse header lines
sub_parser = HeaderParser()
header_lines = []
while self._line and self._line.startswith("##"):
header_lines.append(sub_parser.parse_line(self._line))
self._read_next_line()
# parse sample info line
self.samples = self._handle_sample_line(parsed_samples)
# construct Header object
self.header = header.Header(header_lines, self.samples)
# check header for consistency
self._header_checker.run(self.header)
# construct record parser
self._record_parser = RecordParser(self.header, self.samples, self.record_checks)
# read next line, must not be header
self._read_next_line()
if self._line and self._line.startswith("#"):
raise exceptions.IncorrectVCFFormat(
'Expecting non-header line or EOF after "#CHROM" line'
)
return self.header | Read and parse :py:class:`vcfpy.header.Header` from file, set
into ``self.header`` and return it
:param list parsed_samples: ``list`` of ``str`` for subsetting the
samples to parse
:returns: ``vcfpy.header.Header``
:raises: ``vcfpy.exceptions.InvalidHeaderException`` in the case of
problems reading the header |
def _handle_sample_line(self, parsed_samples=None):
Check and interpret the "##CHROM" line and return samples"""
if not self._line or not self._line.startswith("#CHROM"):
raise exceptions.IncorrectVCFFormat('Missing line starting with "#CHROM"')
# check for space before INFO
line = self._line.rstrip()
pos = line.find("FORMAT") if ("FORMAT" in line) else line.find("INFO")
if pos == -1:
raise exceptions.IncorrectVCFFormat('Ill-formatted line starting with "#CHROM"')
if " " in line[:pos]:
warnings.warn(
(
"Found space in #CHROM line, splitting at whitespace "
"instead of tab; this VCF file is ill-formatted"
),
SpaceInChromLine,
)
arr = self._line.rstrip().split()
else:
arr = self._line.rstrip().split("\t")
self._check_samples_line(arr)
return header.SamplesInfos(arr[len(REQUIRE_SAMPLE_HEADER) :], parsed_samples) | Check and interpret the "##CHROM" line and return samples |
def _check_samples_line(klass, arr):
if len(arr) <= len(REQUIRE_NO_SAMPLE_HEADER):
if tuple(arr) != REQUIRE_NO_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
"Sample header line indicates no sample but does not "
"equal required prefix {}".format("\t".join(REQUIRE_NO_SAMPLE_HEADER))
)
elif tuple(arr[: len(REQUIRE_SAMPLE_HEADER)]) != REQUIRE_SAMPLE_HEADER:
raise exceptions.IncorrectVCFFormat(
'Sample header line (starting with "#CHROM") does not '
"start with required prefix {}".format("\t".join(REQUIRE_SAMPLE_HEADER))
) | Peform additional check on samples line |
def numpy():
'''Lazily import the numpy module'''
if LazyImport.numpy_module is None:
try:
LazyImport.numpy_module = __import__('numpypy')
except ImportError:
try:
LazyImport.numpy_module = __import__('numpy')
except ImportError:
raise ImportError('The numpy module is required')
return LazyImport.numpy_modulf numpy():
'''Lazily import the numpy module'''
if LazyImport.numpy_module is None:
try:
LazyImport.numpy_module = __import__('numpypy')
except ImportError:
try:
LazyImport.numpy_module = __import__('numpy')
except ImportError:
raise ImportError('The numpy module is required')
return LazyImport.numpy_module | Lazily import the numpy module |
def rpy2():
'''Lazily import the rpy2 module'''
if LazyImport.rpy2_module is None:
try:
rpy2 = __import__('rpy2.robjects')
except ImportError:
raise ImportError('The rpy2 module is required')
LazyImport.rpy2_module = rpy2
try:
rpy2.forecast = rpy2.robjects.packages.importr('forecast')
except:
raise ImportError('R and the "forecast" package are required')
rpy2.ts = rpy2.robjects.r['ts']
__import__('rpy2.robjects.numpy2ri')
rpy2.robjects.numpy2ri.activate()
return LazyImport.rpy2_modulf rpy2():
'''Lazily import the rpy2 module'''
if LazyImport.rpy2_module is None:
try:
rpy2 = __import__('rpy2.robjects')
except ImportError:
raise ImportError('The rpy2 module is required')
LazyImport.rpy2_module = rpy2
try:
rpy2.forecast = rpy2.robjects.packages.importr('forecast')
except:
raise ImportError('R and the "forecast" package are required')
rpy2.ts = rpy2.robjects.r['ts']
__import__('rpy2.robjects.numpy2ri')
rpy2.robjects.numpy2ri.activate()
return LazyImport.rpy2_module | Lazily import the rpy2 module |
def map_position(pos):
posiction_dict = dict(zip(range(1, 17), [i for i in range(30, 62) if i % 2]))
return posiction_dict[pos] | Map natural position to machine code postion |
def snap(self, path=None):
if path is None:
path = "/tmp"
else:
path = path.rstrip("/")
day_dir = datetime.datetime.now().strftime("%d%m%Y")
hour_dir = datetime.datetime.now().strftime("%H%M")
ensure_snapshot_dir(path+"/"+self.cam_id+"/"+day_dir+"/"+hour_dir)
f_path = "{0}/{1}/{2}/{3}/{4}.jpg".format(
path,
self.cam_id,
day_dir,
hour_dir,
datetime.datetime.now().strftime("%S"),
)
urllib.urlretrieve(
'http://{0}/snapshot.cgi?user={1}&pwd={2}'.format(
self.address,
self.user,
self.pswd,
),
f_path,
) | Get a snapshot and save it to disk. |
def move(self, pos):
try:
payload = {"address":self.address, "user": self.user, "pwd": self.pswd, "pos": map_position(pos)}
resp = requests.get(
"http://{address}/decoder_control.cgi?command={pos}&user={user}&pwd={pwd}".format(**payload)
)
except KeyError:
raise CamException("Position must be within 1 to 16.")
if resp.status_code != 200:
raise CamException("Unauthorized. Wrong user or password.")
return "ok" | Move cam to given preset position.
pos - must be within 1 to 16.
Returns: CamException in case of errors, "ok" otherwise. |
def status(self):
resp = requests.get("http://{0}/get_status.cgi".format(self.address))
data = resp.text.replace(";", "")
data = data.replace("var", "")
data_s = data.split("\n")
# Last is an empty line
data_s.pop()
data_array = [s.split("=") for s in data_s]
return dict(data_array) | Retrieve some configuration params.
Note: info are returned even without password |
def findOODWords(isleDict, wordList):
'''
Returns all of the out-of-dictionary words found in a list of utterances
'''
oodList = []
for word in wordList:
try:
isleDict.lookup(word)
except WordNotInISLE:
oodList.append(word)
oodList = list(set(oodList))
oodList.sort()
return oodLisf findOODWords(isleDict, wordList):
'''
Returns all of the out-of-dictionary words found in a list of utterances
'''
oodList = []
for word in wordList:
try:
isleDict.lookup(word)
except WordNotInISLE:
oodList.append(word)
oodList = list(set(oodList))
oodList.sort()
return oodList | Returns all of the out-of-dictionary words found in a list of utterances |
def autopair(isleDict, wordList):
'''
Tests whether adjacent words are OOD or not
It returns complete wordLists with the matching words replaced.
Each match yields one sentence.
e.g.
red ball chaser
would return
[[red_ball chaser], [red ball_chaser]], [0, 1]
if 'red_ball' and 'ball_chaser' were both in the dictionary
'''
newWordList = [("%s_%s" % (wordList[i], wordList[i + 1]), i)
for i in range(0, len(wordList) - 1)]
sentenceList = []
indexList = []
for word, i in newWordList:
if word in isleDict.data:
sentenceList.append(wordList[:i] + [word, ] + wordList[i + 1:])
indexList.append(i)
return sentenceList, indexLisf autopair(isleDict, wordList):
'''
Tests whether adjacent words are OOD or not
It returns complete wordLists with the matching words replaced.
Each match yields one sentence.
e.g.
red ball chaser
would return
[[red_ball chaser], [red ball_chaser]], [0, 1]
if 'red_ball' and 'ball_chaser' were both in the dictionary
'''
newWordList = [("%s_%s" % (wordList[i], wordList[i + 1]), i)
for i in range(0, len(wordList) - 1)]
sentenceList = []
indexList = []
for word, i in newWordList:
if word in isleDict.data:
sentenceList.append(wordList[:i] + [word, ] + wordList[i + 1:])
indexList.append(i)
return sentenceList, indexList | Tests whether adjacent words are OOD or not
It returns complete wordLists with the matching words replaced.
Each match yields one sentence.
e.g.
red ball chaser
would return
[[red_ball chaser], [red ball_chaser]], [0, 1]
if 'red_ball' and 'ball_chaser' were both in the dictionary |
def _buildDict(self):
'''
Builds the isle textfile into a dictionary for fast searching
'''
lexDict = {}
with io.open(self.islePath, "r", encoding='utf-8') as fd:
wordList = [line.rstrip('\n') for line in fd]
for row in wordList:
word, pronunciation = row.split(" ", 1)
word, extraInfo = word.split("(", 1)
extraInfo = extraInfo.replace(")", "")
extraInfoList = [segment for segment in extraInfo.split(",")
if ("_" not in segment and "+" not in segment and
':' not in segment and segment != '')]
lexDict.setdefault(word, [])
lexDict[word].append((pronunciation, extraInfoList))
return lexDicf _buildDict(self):
'''
Builds the isle textfile into a dictionary for fast searching
'''
lexDict = {}
with io.open(self.islePath, "r", encoding='utf-8') as fd:
wordList = [line.rstrip('\n') for line in fd]
for row in wordList:
word, pronunciation = row.split(" ", 1)
word, extraInfo = word.split("(", 1)
extraInfo = extraInfo.replace(")", "")
extraInfoList = [segment for segment in extraInfo.split(",")
if ("_" not in segment and "+" not in segment and
':' not in segment and segment != '')]
lexDict.setdefault(word, [])
lexDict[word].append((pronunciation, extraInfoList))
return lexDict | Builds the isle textfile into a dictionary for fast searching |
def search(self, matchStr, numSyllables=None, wordInitial='ok',
wordFinal='ok', spanSyllable='ok', stressedSyllable='ok',
multiword='ok', pos=None):
'''
for help on isletool.LexicalTool.search(), see see isletool.search()
'''
return search(self.data.items(), matchStr, numSyllables=numSyllables,
wordInitial=wordInitial, wordFinal=wordFinal,
spanSyllable=spanSyllable,
stressedSyllable=stressedSyllable,
multiword=multiword, pos=posf search(self, matchStr, numSyllables=None, wordInitial='ok',
wordFinal='ok', spanSyllable='ok', stressedSyllable='ok',
multiword='ok', pos=None):
'''
for help on isletool.LexicalTool.search(), see see isletool.search()
'''
return search(self.data.items(), matchStr, numSyllables=numSyllables,
wordInitial=wordInitial, wordFinal=wordFinal,
spanSyllable=spanSyllable,
stressedSyllable=stressedSyllable,
multiword=multiword, pos=pos) | for help on isletool.LexicalTool.search(), see see isletool.search() |
def timestamps(self):
'''Get all timestamps from all series in the group.'''
timestamps = set()
for series in self.groups.itervalues():
timestamps |= set(series.timestamps)
return sorted(list(timestamps)f timestamps(self):
'''Get all timestamps from all series in the group.'''
timestamps = set()
for series in self.groups.itervalues():
timestamps |= set(series.timestamps)
return sorted(list(timestamps)) | Get all timestamps from all series in the group. |
def trend(self, **kwargs):
'''Calculate a trend for all series in the group. See the
`TimeSeries.trend()` method for more information.'''
return DataFrame({ name: series.trend(**kwargs) \
for name, series in self.groups.iteritems() }f trend(self, **kwargs):
'''Calculate a trend for all series in the group. See the
`TimeSeries.trend()` method for more information.'''
return DataFrame({ name: series.trend(**kwargs) \
for name, series in self.groups.iteritems() }) | Calculate a trend for all series in the group. See the
`TimeSeries.trend()` method for more information. |
def forecast(self, horizon, **kwargs):
'''Forecast all time series in the group. See the
`TimeSeries.forecast()` method for more information.'''
return DataFrame({ name: series.forecast(horizon, **kwargs) \
for name, series in self.groups.iteritems() }f forecast(self, horizon, **kwargs):
'''Forecast all time series in the group. See the
`TimeSeries.forecast()` method for more information.'''
return DataFrame({ name: series.forecast(horizon, **kwargs) \
for name, series in self.groups.iteritems() }) | Forecast all time series in the group. See the
`TimeSeries.forecast()` method for more information. |
def plot(self, overlay=True, **labels): # pragma: no cover
'''Plot all time series in the group.'''
pylab = LazyImport.pylab()
colours = list('rgbymc')
colours_len = len(colours)
colours_pos = 0
plots = len(self.groups)
for name, series in self.groups.iteritems():
colour = colours[colours_pos % colours_len]
colours_pos += 1
if not overlay:
pylab.subplot(plots, 1, colours_pos)
kwargs = {}
if name in labels:
name = labels[name]
if name is not None:
kwargs['label'] = name
pylab.plot(series.dates, series.values, '%s-' % colour, **kwargs)
if name is not None:
pylab.legend()
pylab.show(f plot(self, overlay=True, **labels): # pragma: no cover
'''Plot all time series in the group.'''
pylab = LazyImport.pylab()
colours = list('rgbymc')
colours_len = len(colours)
colours_pos = 0
plots = len(self.groups)
for name, series in self.groups.iteritems():
colour = colours[colours_pos % colours_len]
colours_pos += 1
if not overlay:
pylab.subplot(plots, 1, colours_pos)
kwargs = {}
if name in labels:
name = labels[name]
if name is not None:
kwargs['label'] = name
pylab.plot(series.dates, series.values, '%s-' % colour, **kwargs)
if name is not None:
pylab.legend()
pylab.show() | Plot all time series in the group. |
def rename(self, **kwargs):
'''Rename series in the group.'''
for old, new in kwargs.iteritems():
if old in self.groups:
self.groups[new] = self.groups[old]
del self.groups[oldf rename(self, **kwargs):
'''Rename series in the group.'''
for old, new in kwargs.iteritems():
if old in self.groups:
self.groups[new] = self.groups[old]
del self.groups[old] | Rename series in the group. |
def from_stream(
klass, stream, path=None, tabix_path=None, record_checks=None, parsed_samples=None
):
record_checks = record_checks or []
if tabix_path and not path:
raise ValueError("Must give path if tabix_path is given")
return Reader(
stream=stream,
path=path,
tabix_path=tabix_path,
record_checks=record_checks,
parsed_samples=parsed_samples,
) | Create new :py:class:`Reader` from file
.. note::
If you use the ``parsed_samples`` feature and you write out
records then you must not change the ``FORMAT`` of the record.
:param stream: ``file``-like object to read from
:param path: optional string with path to store (for display only)
:param list record_checks: record checks to perform, can contain
'INFO' and 'FORMAT'
:param list parsed_samples: ``list`` of ``str`` values with names of
samples to parse call information for (for speedup); leave to
``None`` for ignoring |
def from_path(klass, path, tabix_path=None, record_checks=None, parsed_samples=None):
record_checks = record_checks or []
path = str(path)
if path.endswith(".gz"):
f = gzip.open(path, "rt")
if not tabix_path:
tabix_path = path + ".tbi"
if not os.path.exists(tabix_path):
tabix_path = None # guessing path failed
else:
f = open(path, "rt")
return klass.from_stream(
stream=f,
path=path,
tabix_path=tabix_path,
record_checks=record_checks,
parsed_samples=parsed_samples,
) | Create new :py:class:`Reader` from path
.. note::
If you use the ``parsed_samples`` feature and you write out
records then you must not change the ``FORMAT`` of the record.
:param path: the path to load from (converted to ``str`` for
compatibility with ``path.py``)
:param tabix_path: optional string with path to TBI index,
automatic inferral from ``path`` will be tried on the fly
if not given
:param list record_checks: record checks to perform, can contain
'INFO' and 'FORMAT' |
def fetch(self, chrom_or_region, begin=None, end=None):
if begin is not None and end is None:
raise ValueError("begin and end must both be None or neither")
# close tabix file if any and is open
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
# open tabix file if not yet open
if not self.tabix_file or self.tabix_file.closed:
self.tabix_file = pysam.TabixFile(filename=self.path, index=self.tabix_path)
# jump to the next position
if begin is None:
self.tabix_iter = self.tabix_file.fetch(region=chrom_or_region)
else:
self.tabix_iter = self.tabix_file.fetch(reference=chrom_or_region, start=begin, end=end)
return self | Jump to the start position of the given chromosomal position
and limit iteration to the end position
:param str chrom_or_region: name of the chromosome to jump to if
begin and end are given and a samtools region string otherwise
(e.g. "chr1:123,456-123,900").
:param int begin: 0-based begin position (inclusive)
:param int end: 0-based end position (exclusive) |
def close(self):
if self.tabix_file and not self.tabix_file.closed:
self.tabix_file.close()
if self.stream:
self.stream.close() | Close underlying stream |
def serialize_for_header(key, value):
if key in QUOTE_FIELDS:
return json.dumps(value)
elif isinstance(value, str):
if " " in value or "\t" in value:
return json.dumps(value)
else:
return value
elif isinstance(value, list):
return "[{}]".format(", ".join(value))
else:
return str(value) | Serialize value for the given mapping key for a VCF header line |
def header_without_lines(header, remove):
remove = set(remove)
# Copy over lines that are not removed
lines = []
for line in header.lines:
if hasattr(line, "mapping"):
if (line.key, line.mapping.get("ID", None)) in remove:
continue # filter out
else:
if (line.key, line.value) in remove:
continue # filter out
lines.append(line)
return Header(lines, header.samples) | Return :py:class:`Header` without lines given in ``remove``
``remove`` is an iterable of pairs ``key``/``ID`` with the VCF header key
and ``ID`` of entry to remove. In the case that a line does not have
a ``mapping`` entry, you can give the full value to remove.
.. code-block:: python
# header is a vcfpy.Header, e.g., as read earlier from file
new_header = vcfpy.without_header_lines(
header, [('assembly', None), ('FILTER', 'PASS')])
# now, the header lines starting with "##assembly=" and the "PASS"
# filter line will be missing from new_header |
def mapping_to_str(mapping):
result = ["<"]
for i, (key, value) in enumerate(mapping.items()):
if i > 0:
result.append(",")
result += [key, "=", serialize_for_header(key, value)]
result += [">"]
return "".join(result) | Convert mapping to string |
def _build_indices(self):
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result | Build indices for the different field types |
def copy(self):
return Header([line.copy() for line in self.lines], self.samples.copy()) | Return a copy of this header |
def get_lines(self, key):
if key in self._indices:
return self._indices[key].values()
else:
return [] | Return header lines having the given ``key`` as their type |
def has_header_line(self, key, id_):
if key not in self._indices:
return False
else:
return id_ in self._indices[key] | Return whether there is a header line with the given ID of the
type given by ``key``
:param key: The VCF header key/line type.
:param id_: The ID value to compare fore
:return: ``True`` if there is a header line starting with ``##${key}=``
in the VCF file having the mapping entry ``ID`` set to ``id_``. |
def add_line(self, header_line):
self.lines.append(header_line)
self._indices.setdefault(header_line.key, OrderedDict())
if not hasattr(header_line, "mapping"):
return False # no registration required
if self.has_header_line(header_line.key, header_line.mapping["ID"]):
warnings.warn(
(
"Detected duplicate header line with type {} and ID {}. "
"Ignoring this and subsequent one"
).format(header_line.key, header_line.mapping["ID"]),
DuplicateHeaderLineWarning,
)
return False
else:
self._indices[header_line.key][header_line.mapping["ID"]] = header_line
return True | Add header line, updating any necessary support indices
:return: ``False`` on conflicting line and ``True`` otherwise |
def copy(self):
mapping = OrderedDict(self.mapping.items())
return self.__class__(self.key, self.value, mapping) | Return a copy |
def _parse_number(klass, number):
try:
return int(number)
except ValueError as e:
if number in VALID_NUMBERS:
return number
else:
raise e | Parse ``number`` into an ``int`` or return ``number`` if a valid
expression for a INFO/FORMAT "Number".
:param str number: ``str`` to parse and check |
def _syllabifyPhones(phoneList, syllableList):
'''
Given a phone list and a syllable list, syllabify the phones
Typically used by findBestSyllabification which first aligns the phoneList
with a dictionary phoneList and then uses the dictionary syllabification
to syllabify the input phoneList.
'''
numPhoneList = [len(syllable) for syllable in syllableList]
start = 0
syllabifiedList = []
for end in numPhoneList:
syllable = phoneList[start:start + end]
syllabifiedList.append(syllable)
start += end
return syllabifiedLisf _syllabifyPhones(phoneList, syllableList):
'''
Given a phone list and a syllable list, syllabify the phones
Typically used by findBestSyllabification which first aligns the phoneList
with a dictionary phoneList and then uses the dictionary syllabification
to syllabify the input phoneList.
'''
numPhoneList = [len(syllable) for syllable in syllableList]
start = 0
syllabifiedList = []
for end in numPhoneList:
syllable = phoneList[start:start + end]
syllabifiedList.append(syllable)
start += end
return syllabifiedList | Given a phone list and a syllable list, syllabify the phones
Typically used by findBestSyllabification which first aligns the phoneList
with a dictionary phoneList and then uses the dictionary syllabification
to syllabify the input phoneList. |
def _getSyllableNucleus(phoneList):
'''
Given the phones in a syllable, retrieves the vowel index
'''
cvList = ['V' if isletool.isVowel(phone) else 'C' for phone in phoneList]
vowelCount = cvList.count('V')
if vowelCount > 1:
raise TooManyVowelsInSyllable(phoneList, cvList)
if vowelCount == 1:
stressI = cvList.index('V')
else:
stressI = None
return stressf _getSyllableNucleus(phoneList):
'''
Given the phones in a syllable, retrieves the vowel index
'''
cvList = ['V' if isletool.isVowel(phone) else 'C' for phone in phoneList]
vowelCount = cvList.count('V')
if vowelCount > 1:
raise TooManyVowelsInSyllable(phoneList, cvList)
if vowelCount == 1:
stressI = cvList.index('V')
else:
stressI = None
return stressI | Given the phones in a syllable, retrieves the vowel index |
def findClosestPronunciation(inputIsleWordList, aPron):
'''
Find the closest dictionary pronunciation to a provided pronunciation
'''
retList = _findBestPronunciation(inputIsleWordList, aPron)
isleWordList = retList[0]
bestIndex = retList[3]
return isleWordList[bestIndexf findClosestPronunciation(inputIsleWordList, aPron):
'''
Find the closest dictionary pronunciation to a provided pronunciation
'''
retList = _findBestPronunciation(inputIsleWordList, aPron)
isleWordList = retList[0]
bestIndex = retList[3]
return isleWordList[bestIndex] | Find the closest dictionary pronunciation to a provided pronunciation |
def create_switch(type, settings, pin):
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch | Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch |
def format_atomic(value):
# Perform escaping
if isinstance(value, str):
if any(r in value for r in record.RESERVED_CHARS):
for k, v in record.ESCAPE_MAPPING:
value = value.replace(k, v)
# String-format the given value
if value is None:
return "."
else:
return str(value) | Format atomic value
This function also takes care of escaping the value in case one of the
reserved characters occurs in the value. |
def format_value(field_info, value, section):
if section == "FORMAT" and field_info.id == "FT":
if not value:
return "."
elif isinstance(value, list):
return ";".join(map(format_atomic, value))
elif field_info.number == 1:
if value is None:
return "."
else:
return format_atomic(value)
else:
if not value:
return "."
else:
return ",".join(map(format_atomic, value)) | Format possibly compound value given the FieldInfo |
def from_stream(klass, stream, header, path=None, use_bgzf=None):
if use_bgzf or (use_bgzf is None and path and path.endswith(".gz")):
stream = bgzf.BgzfWriter(fileobj=stream)
return Writer(stream, header, path) | Create new :py:class:`Writer` from file
Note that for getting bgzf support, you have to pass in a stream
opened in binary mode. Further, you either have to provide a ``path``
ending in ``".gz"`` or set ``use_bgzf=True``. Otherwise, you will
get the notorious "TypeError: 'str' does not support the buffer
interface".
:param stream: ``file``-like object to write to
:param header: VCF header to use, lines and samples are deep-copied
:param path: optional string with path to store (for display only)
:param use_bgzf: indicator whether to write bgzf to ``stream``
if ``True``, prevent if ``False``, interpret ``path`` if ``None`` |
def from_path(klass, path, header):
path = str(path)
use_bgzf = False # we already interpret path
if path.endswith(".gz"):
f = bgzf.BgzfWriter(filename=path)
else:
f = open(path, "wt")
return klass.from_stream(f, header, path, use_bgzf=use_bgzf) | Create new :py:class:`Writer` from path
:param path: the path to load from (converted to ``str`` for
compatibility with ``path.py``)
:param header: VCF header to use, lines and samples are deep-copied |
def _write_header(self):
for line in self.header.lines:
print(line.serialize(), file=self.stream)
if self.header.samples.names:
print(
"\t".join(list(parser.REQUIRE_SAMPLE_HEADER) + self.header.samples.names),
file=self.stream,
)
else:
print("\t".join(parser.REQUIRE_NO_SAMPLE_HEADER), file=self.stream) | Write out the header |
def _serialize_record(self, record):
f = self._empty_to_dot
row = [record.CHROM, record.POS]
row.append(f(";".join(record.ID)))
row.append(f(record.REF))
if not record.ALT:
row.append(".")
else:
row.append(",".join([f(a.serialize()) for a in record.ALT]))
row.append(f(record.QUAL))
row.append(f(";".join(record.FILTER)))
row.append(f(self._serialize_info(record)))
if record.FORMAT:
row.append(":".join(record.FORMAT))
row += [
self._serialize_call(record.FORMAT, record.call_for_sample[s])
for s in self.header.samples.names
]
print(*row, sep="\t", file=self.stream) | Serialize whole Record |
def _serialize_info(self, record):
result = []
for key, value in record.INFO.items():
info = self.header.get_info_field_info(key)
if info.type == "Flag":
result.append(key)
else:
result.append("{}={}".format(key, format_value(info, value, "INFO")))
return ";".join(result) | Return serialized version of record.INFO |
def _serialize_call(self, format_, call):
if isinstance(call, record.UnparsedCall):
return call.unparsed_data
else:
result = [
format_value(self.header.get_format_field_info(key), call.data.get(key), "FORMAT")
for key in format_
]
return ":".join(result) | Return serialized version of the Call using the record's FORMAT |
def _create_extended_jinja_tags(self, nodes):
jinja_a = None
jinja_b = None
ext_node = None
ext_nodes = []
for node in nodes:
if isinstance(node, EmptyLine):
continue
if node.has_children():
node.children = self._create_extended_jinja_tags(node.children)
if not isinstance(node, JinjaTag):
jinja_a = None
continue
if jinja_a is None or (
node.tag_name in self._extended_tags and jinja_a.tag_name not in self._extended_tags[node.tag_name]):
jinja_a = node
continue
if node.tag_name in self._extended_tags and \
jinja_a.tag_name in self._extended_tags[node.tag_name]:
if ext_node is None:
ext_node = ExtendedJinjaTag()
ext_node.add(jinja_a)
ext_nodes.append(ext_node)
ext_node.add(node)
else:
ext_node = None
jinja_a = node
#replace the nodes with the new extended node
for node in ext_nodes:
nodes.insert(nodes.index(node.children[0]), node)
index = nodes.index(node.children[0])
del nodes[index:index+len(node.children)]
return nodes | Loops through the nodes and looks for special jinja tags that
contains more than one tag but only one ending tag. |
def has_children(self):
"returns False if children is empty or contains only empty lines else True."
return bool([x for x in self.children if not isinstance(x, EmptyLine)]f has_children(self):
"returns False if children is empty or contains only empty lines else True."
return bool([x for x in self.children if not isinstance(x, EmptyLine)]) | returns False if children is empty or contains only empty lines else True. |
def parse_requirements(path):
requirements = []
with open(path, "rt") as reqs_f:
for line in reqs_f:
line = line.strip()
if line.startswith("-r"):
fname = line.split()[1]
inner_path = os.path.join(os.path.dirname(path), fname)
requirements += parse_requirements(inner_path)
elif line != "" and not line.startswith("#"):
requirements.append(line)
return requirements | Parse ``requirements.txt`` at ``path``. |
def __parse_args(cam_c):
user = None
pswd = None
name = None
address = "{address}:{port}".format(**cam_c)
if "user" in cam_c:
user = cam_c["user"]
if "pswd" in cam_c:
pswd = cam_c["pswd"]
if "name" in cam_c:
name = cam_c["name"]
return {"user": user, "pswd": pswd, "name": name, "address": address} | Arrange class init params from conf file.
Returns: a dict with values. |
def load_cams(conf_file):
with open(conf_file, "r") as c_file:
lines = c_file.readlines()
cams_conf = [json.loads(j) for j in lines]
cams = []
for cam_c in cams_conf:
init_params = __parse_args(cam_c)
cams.append(cam_types[cam_c["type"]](**init_params))
return cams | Reads cams conf from file and intantiate appropiate classes.
Returns: an array of IpCam classes. |
def watch(cams, path=None, delay=10):
while True:
for c in cams:
c.snap(path)
time.sleep(delay) | Get screenshots from all cams at defined intervall. |
def score(self, phone_number, account_lifecycle_event, **params):
return self.post(SCORE_RESOURCE.format(phone_number=phone_number),
account_lifecycle_event=account_lifecycle_event,
**params) | Score is an API that delivers reputation scoring based on phone number intelligence, traffic patterns, machine
learning, and a global data consortium.
See https://developer.telesign.com/docs/score-api for detailed API documentation. |
def load_version(filename='fuzzyhashlib/version.py'):
with open(filename) as source:
text = source.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", text)
if not match:
msg = "Unable to find version number in {}".format(filename)
raise RuntimeError(msg)
version = match.group(1)
return version | Parse a __version__ number from a source file |
def post(self, resource, **params):
return self._execute(self.session.post, 'POST', resource, **params) | Generic TeleSign REST API POST handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the POST request with, as a dictionary.
:return: The RestClient Response object. |
def get(self, resource, **params):
return self._execute(self.session.get, 'GET', resource, **params) | Generic TeleSign REST API GET handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the GET request with, as a dictionary.
:return: The RestClient Response object. |
def put(self, resource, **params):
return self._execute(self.session.put, 'PUT', resource, **params) | Generic TeleSign REST API PUT handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the PUT request with, as a dictionary.
:return: The RestClient Response object. |
def delete(self, resource, **params):
return self._execute(self.session.delete, 'DELETE', resource, **params) | Generic TeleSign REST API DELETE handler.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the DELETE request with, as a dictionary.
:return: The RestClient Response object. |
def _execute(self, method_function, method_name, resource, **params):
resource_uri = "{api_host}{resource}".format(api_host=self.api_host, resource=resource)
url_encoded_fields = self._encode_params(params)
headers = RestClient.generate_telesign_headers(self.customer_id,
self.api_key,
method_name,
resource,
url_encoded_fields,
user_agent=self.user_agent)
if method_name in ['POST', 'PUT']:
payload = {'data': url_encoded_fields}
else:
payload = {'params': url_encoded_fields}
response = self.Response(method_function(resource_uri,
headers=headers,
timeout=self.timeout,
**payload))
return response | Generic TeleSign REST API request handler.
:param method_function: The Requests HTTP request function to perform the request.
:param method_name: The HTTP method name, as an upper case string.
:param resource: The partial resource URI to perform the request against, as a string.
:param params: Body params to perform the HTTP request with, as a dictionary.
:return: The RestClient Response object. |
def message(self, phone_number, message, message_type, **params):
return self.post(MESSAGING_RESOURCE,
phone_number=phone_number,
message=message,
message_type=message_type,
**params) | Send a message to the target phone_number.
See https://developer.telesign.com/docs/messaging-api for detailed API documentation. |
def status(self, reference_id, **params):
return self.get(MESSAGING_STATUS_RESOURCE.format(reference_id=reference_id),
**params) | Retrieves the current status of the message.
See https://developer.telesign.com/docs/messaging-api for detailed API documentation. |
def phoneid(self, phone_number, **params):
return self.post(PHONEID_RESOURCE.format(phone_number=phone_number),
**params) | The PhoneID API provides a cleansed phone number, phone type, and telecom carrier information to determine the
best communication method - SMS or voice.
See https://developer.telesign.com/docs/phoneid-api for detailed API documentation. |
def hexdigest(self):
if self._pre_computed_hash is None:
return libssdeep_wrapper.fuzzy_digest(self._state, 0)
else:
return self._pre_computed_hash | Return the digest value as a string of hexadecimal digits. |
def update(self, buf):
if self._updatable:
return libssdeep_wrapper.fuzzy_update(self._state, buf)
else:
raise InvalidOperation("Cannot update sdeep created from hash") | Update this hash object's state with the provided string. |
def copy(self):
if self._pre_computed_hash is None:
temp = ssdeep(buf="")
else:
temp = ssdeep(hash=hash)
libssdeep_wrapper.fuzzy_free(temp._state)
temp._state = libssdeep_wrapper.fuzzy_clone(self._state)
temp._updatable = self._updatable
temp._pre_computed_hash = self._pre_computed_hash
return temp | Returns a new instance which identical to this instance. |
def hexdigest(self):
if not self._final:
if self._buf_len >= self._MIN_LEN:
self._tlsh.final()
self._final = True
else:
raise ValueError("tlsh requires buffer with length >= %d "
"for mode where force = %s" % \
(self._MIN_LEN, False))
return self._tlsh.hexdigest() | Return the digest value as a string of hexadecimal digits. |
def update(self, buf):
if self._final:
raise InvalidOperation("Cannot update finalised tlsh")
else:
self._buf_len += len(buf)
return self._tlsh.update(buf) | Update this hash object's state with the provided string. |
def call(self, phone_number, message, message_type, **params):
return self.post(VOICE_RESOURCE,
phone_number=phone_number,
message=message,
message_type=message_type,
**params) | Send a voice call to the target phone_number.
See https://developer.telesign.com/docs/voice-api for detailed API documentation. |
def status(self, reference_id, **params):
return self.get(VOICE_STATUS_RESOURCE.format(reference_id=reference_id),
**params) | Retrieves the current status of the voice call.
See https://developer.telesign.com/docs/voice-api for detailed API documentation. |
def status(self, external_id, **params):
return self.get(APPVERIFY_STATUS_RESOURCE.format(external_id=external_id),
**params) | Retrieves the verification result for an App Verify transaction by external_id. To ensure a secure verification
flow you must check the status using TeleSign's servers on your backend. Do not rely on the SDK alone to
indicate a successful verification.
See https://developer.telesign.com/docs/app-verify-android-sdk-self#section-get-status-service or
https://developer.telesign.com/docs/app-verify-ios-sdk-self#section-get-status-service for detailed
API documentation. |
def get_asset_location(element, attr):
asset_location = re.match(r'^/?(static)?/?(.*)', element[attr],
re.IGNORECASE)
# replace relative links i.e (../../static)
asset_location = asset_location.group(2).replace('../', '')
return asset_location | Get Asset Location.
Remove leading slash e.g '/static/images.jpg' ==> static/images.jpg
Also, if the url is also prefixed with static, it would be removed.
e.g static/image.jpg ==> image.jpg |
def transform(matches, framework, namespace, static_endpoint):
transformed = []
namespace = namespace + '/' if namespace else ''
for attribute, elements in matches:
for element in elements:
asset_location = get_asset_location(element, attribute)
# string substitution
sub_dict = {
'static_endpoint': static_endpoint, 'namespace': namespace,
'asset_location': asset_location
}
transformed_string = frameworks[framework] % sub_dict
res = (attribute, element[attribute], transformed_string)
transformed.append(res)
return transformed | The actual transformation occurs here.
flask example: images/staticfy.jpg', ==>
"{{ url_for('static', filename='images/staticfy.jpg') }}" |
def get_elements(html_file, tags):
with open(html_file) as f:
document = BeautifulSoup(f, 'html.parser')
def condition(tag, attr):
# Don't include external links
return lambda x: x.name == tag \
and not x.get(attr, 'http').startswith(('http', '//'))
all_tags = [(attr, document.find_all(condition(tag, attr)))
for tag, attr in tags]
return all_tags | Extract all the elements we're interested in.
Returns a list of tuples with the attribute as first item
and the list of elements as the second item. |
def replace_lines(html_file, transformed):
result = []
with codecs.open(html_file, 'r', 'utf-8') as input_file:
for line in input_file:
# replace all single quotes with double quotes
line = re.sub(r'\'', '"', line)
for attr, value, new_link in transformed:
if attr in line and value in line:
# replace old link with new staticfied link
new_line = line.replace(value, new_link)
result.append(new_line)
break
else:
result.append(line)
return ''.join(result) | Replace lines in the old file with the transformed lines. |
def staticfy(html_file, args=argparse.ArgumentParser()):
# unpack arguments
static_endpoint = args.static_endpoint or 'static'
framework = args.framework or os.getenv('STATICFY_FRAMEWORK', 'flask')
add_tags = args.add_tags or {}
exc_tags = args.exc_tags or {}
namespace = args.namespace or {}
# default tags
tags = {('img', 'src'), ('link', 'href'), ('script', 'src')}
# generate additional_tags
add_tags = {(tag, attr) for tag, attr in add_tags.items()}
tags.update(add_tags)
# remove tags if any was specified
exc_tags = {(tag, attr) for tag, attr in exc_tags.items()}
tags = tags - exc_tags
# get elements we're interested in
matches = get_elements(html_file, tags)
# transform old links to new links
transformed = transform(matches, framework, namespace, static_endpoint)
return replace_lines(html_file, transformed) | Staticfy method.
Loop through each line of the file and replaces the old links |
def file_ops(staticfied, args):
destination = args.o or args.output
if destination:
with open(destination, 'w') as file:
file.write(staticfied)
else:
print(staticfied) | Write to stdout or a file |
def parse_cmd_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='Filename to be staticfied')
parser.add_argument('--static-endpoint',
help='Static endpoint which is "static" by default')
parser.add_argument('--add-tags', type=str,
help='Additional tags to staticfy')
parser.add_argument('--exc-tags', type=str, help='tags to exclude')
parser.add_argument('--framework', type=str,
help='Web Framework: Defaults to Flask')
parser.add_argument('--namespace', type=str,
help='String to prefix url with')
parser.add_argument('-o', type=str, help='Specify output file')
parser.add_argument('--output', type=str, help='Specify output file')
args = parser.parse_args()
return args | Parse command line arguments. |
def main():
args = parse_cmd_arguments()
html_file = args.file
try:
json.loads(args.add_tags or '{}')
json.loads(args.exc_tags or '{}')
except ValueError:
print('\033[91m' + 'Invalid json string: please provide a valid json '
'string e.g {}'.format('\'{"img": "data-url"}\'') + '\033[0m')
sys.exit(1)
staticfied = staticfy(html_file, args=args).encode('utf-8')
file_ops(staticfied, args=args) | Main method. |
def find_keys(args):
key = args['--key']
if key:
return [key]
keyfile = args['--apikeys']
if keyfile:
return read_keyfile(keyfile)
envkey = os.environ.get('TINYPNG_API_KEY', None)
if envkey:
return [envkey]
local_keys = join(abspath("."), "tinypng.keys")
if isfile(local_keys):
return read_keyfile(local_keys)
home_keys = join(expanduser("~/.tinypng.keys"))
if isfile(home_keys):
return read_keyfile(home_keys)
return [] | Get keys specified in arguments
returns list of keys or None |
def get_shrink_data_info(in_data, api_key=None):
if api_key:
return _shrink_info(in_data, api_key)
api_keys = find_keys()
for key in api_keys:
try:
return _shrink_info(in_data, key)
except ValueError:
pass
raise ValueError('No valid api key found') | Shrink binary data of a png
returns api_info |
def get_shrunk_data(shrink_info):
out_url = shrink_info['output']['url']
try:
return requests.get(out_url).content
except HTTPError as err:
if err.code != 404:
raise
exc = ValueError("Unable to read png file \"{0}\"".format(out_url))
exc.__cause__ = err
raise exc | Read shrunk file from tinypng.org api. |
def shrink_data(in_data, api_key=None):
info = get_shrink_data_info(in_data, api_key)
return info, get_shrunk_data(info) | Shrink binary data of a png
returns (api_info, shrunk_data) |
def shrink_file(in_filepath, api_key=None, out_filepath=None):
info = get_shrink_file_info(in_filepath, api_key, out_filepath)
write_shrunk_file(info)
return info | Shrink png file and write it back to a new file
The default file path replaces ".png" with ".tiny.png".
returns api_info (including info['ouput']['filepath']) |
def verify_telesign_callback_signature(api_key, signature, json_str):
your_signature = b64encode(HMAC(b64decode(api_key), json_str.encode("utf-8"), sha256).digest()).decode("utf-8")
if len(signature) != len(your_signature):
return False
# avoid timing attack with constant time equality check
signatures_equal = True
for x, y in zip(signature, your_signature):
if not x == y:
signatures_equal = False
return signatures_equal | Verify that a callback was made by TeleSign and was not sent by a malicious client by verifying the signature.
:param api_key: the TeleSign API api_key associated with your account.
:param signature: the TeleSign Authorization header value supplied in the callback, as a string.
:param json_str: the POST body text, that is, the JSON string sent by TeleSign describing the transaction status. |
def add_item(self, host, key, value, clock=None, state=0):
if clock is None:
clock = self.clock
if self._config.data_type == "items":
item = {"host": host, "key": key,
"value": value, "clock": clock, "state": state}
elif self._config.data_type == "lld":
item = {"host": host, "key": key, "clock": clock, "state": state,
"value": json.dumps({"data": value})}
else:
if self.logger: # pragma: no cover
self.logger.error("Setup data_type before adding data")
raise ValueError('Setup data_type before adding data')
self._items_list.append(item) | Add a single item into DataContainer
:host: hostname to which item will be linked to
:key: item key as defined in Zabbix
:value: item value
:clock: timestemp as integer. If not provided self.clock()) will be used |
def add(self, data):
for host in data:
for key in data[host]:
if not data[host][key] == []:
self.add_item(host, key, data[host][key]) | Add a list of item into the container
:data: dict of items & value per hostname |
def _send_common(self, item):
total = len(item)
processed = failed = time = 0
if self._config.dryrun is True:
total = len(item)
processed = failed = time = 0
response = 'dryrun'
else:
self._send_to_zabbix(item)
response, processed, failed, total, time = self._read_from_zabbix()
output_key = '(bulk)'
output_item = '(bulk)'
if self.debug_level >= 4:
output_key = item[0]['key']
output_item = item[0]['value']
if self.logger: # pragma: no cover
self.logger.info(
"" +
ZBX_DBG_SEND_RESULT % (
processed,
failed,
total,
output_key,
output_item,
response
)
)
return response, processed, failed, total, time | Common part of sending operations
Calls SenderProtocol._send_to_zabbix
Returns result as provided by _handle_response
:item: either a list or a single item depending on debug_level |
def _reset(self):
# Reset DataContainer to default values
# So that it can be reused
if self.logger: # pragma: no cover
self.logger.info("Reset DataContainer")
self._items_list = []
self._config.data_type = None | Reset main DataContainer properties |
def logger(self, value):
if isinstance(value, logging.Logger):
self._logger = value
else:
if self._logger: # pragma: no cover
self._logger.error("logger requires a logging instance")
raise ValueError('logger requires a logging instance') | Set logger instance for the class |
def bind(self, event_name, callback):
if event_name not in self.event_callbacks.keys():
self.event_callbacks[event_name] = []
self.event_callbacks[event_name].append(callback) | Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event. |
def get_issue(self, issue_id, params=None):
return self._get(self.API_URL + 'issue/{}'.format(issue_id), params=params) | Returns a full representation of the issue for the given issue key.
The issue JSON consists of the issue key and a collection of fields. Additional information like links to
workflow transition sub-resources, or HTML rendered values of the fields supporting HTML rendering can be
retrieved with expand request parameter specified.
The fields request parameter accepts a comma-separated list of fields to include in the response. It can be used
to retrieve a subset of fields. By default all fields are returned in the response. A particular field can be
excluded from the response if prefixed with a "-" (minus) sign. Parameter can be provided multiple times on a
single request.
By default, all fields are returned in the response. Note: this is different from a JQL search - only navigable
fields are returned by default (*navigable).
Args:
issue_id:
params:
Returns: |
def create_issue(self, data, params=None):
return self._post(self.API_URL + 'issue', data=data, params=params) | Creates an issue or a sub-task from a JSON representation.
You can provide two parameters in request's body: update or fields. The fields, that can be set on an issue
create operation, can be determined using the /rest/api/2/issue/createmeta resource. If a particular field is
not configured to appear on the issue's Create screen, then it will not be returned in the createmeta response.
A field validation error will occur if such field is submitted in request.
Creating a sub-task is similar to creating an issue with the following differences:
issueType field must be set to a sub-task issue type (use /issue/createmeta to find sub-task issue types), and
You must provide a parent field with the ID or key of the parent issue.
Args:
data:
params:
Returns: |
def delete_issue(self, issue_id, params=None):
return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params) | Deletes an individual issue.
If the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete
an issue without deleting its sub-tasks.
Args:
issue_id:
params:
Returns: |
def subscribe(self, channel_name):
data = {'channel': channel_name}
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret,
self.user_data
)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_private_key(
self.connection.socket_id,
self.key,
channel_name,
self.secret
)
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name] | Subscribe to a channel
:param channel_name: The name of the channel to subscribe to.
:type channel_name: str
:rtype : Channel |
def _handle_response(self, zbx_answer):
zbx_answer = json.loads(zbx_answer)
if self._logger: # pragma: no cover
self._logger.info(
"Anaylizing Zabbix Server's answer"
)
if zbx_answer:
self._logger.debug("Zabbix Server response is: [%s]" % zbx_answer)
# Default items number in length of th storage list
nb_item = len(self._items_list)
if self._config.debug_level >= 4:
# If debug enabled, force it to 1
nb_item = 1
# If dryrun is disabled, we can process answer
response = zbx_answer.get('response')
result = re.findall(ZBX_RESP_REGEX, zbx_answer.get('info'))
processed, failed, total, time = result[0]
return response, int(processed), int(failed), int(total), float(time) | Analyze Zabbix Server response
Returns a list with number of:
* processed items
* failed items
* total items
* time spent
:zbx_answer: Zabbix server response as string |
def list(self, pagination=True, page_size=None, page=None, **queryparams):
if page_size and pagination:
try:
page_size = int(page_size)
except (ValueError, TypeError):
page_size = 100
queryparams['page_size'] = page_size
result = self.requester.get(
self.instance.endpoint, query=queryparams, paginate=pagination
)
objects = SearchableList()
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False) and not page:
next_page = 2
else:
next_page = None
while next_page:
pageparams = queryparams.copy()
pageparams['page'] = next_page
result = self.requester.get(
self.instance.endpoint, query=pageparams,
)
objects.extend(self.parse_list(result.json()))
if result.headers.get('X-Pagination-Next', False):
next_page += 1
else:
next_page = None
return objects | Retrieves a list of objects.
By default uses local cache and remote pagination
If pagination is used and no page is requested (the default), all the
remote objects are retrieved and appended in a single list.
If pagination is disabled, all the objects are fetched from the
endpoint and returned. This may trigger some parsing error if the
result set is very large.
:param pagination: Use pagination (default: `True`)
:param page_size: Size of the pagination page (default: `100`).
Any non numeric value will be casted to the
default value
:param page: Page number to retrieve (default: `None`). Ignored if
`pagination` is `False`
:param queryparams: Additional filter parameters as accepted by the
remote API
:return: <SearchableList> |
def parse(cls, requester, entries):
result_entries = SearchableList()
for entry in entries:
result_entries.append(cls.instance.parse(requester, entry))
return result_entries | Parse a JSON array into a list of model instances. |
def parse_list(self, entries):
result_entries = SearchableList()
for entry in entries:
result_entries.append(self.instance.parse(self.requester, entry))
return result_entries | Parse a JSON array into a list of model instances. |
def update(self, **args):
self_dict = self.to_dict()
if args:
self_dict = dict(list(self_dict.items()) + list(args.items()))
response = self.requester.put(
'/{endpoint}/{id}', endpoint=self.endpoint,
id=self.id, payload=self_dict
)
obj_json = response.json()
if 'version' in obj_json:
self.__dict__['version'] = obj_json['version']
return self | Update the current :class:`InstanceResource` |
def patch(self, fields, **args):
self_dict = dict([(key, value) for (key, value) in
self.to_dict().items()
if key in fields])
if args:
self_dict = dict(list(self_dict.items()) + list(args.items()))
response = self.requester.patch(
'/{endpoint}/{id}', endpoint=self.endpoint,
id=self.id, payload=self_dict
)
obj_json = response.json()
if 'version' in obj_json:
self.__dict__['version'] = obj_json['version']
return self | Patch the current :class:`InstanceResource` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.