_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q276900
|
SignalHandler.default_handler
|
test
|
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise
|
python
|
{
"resource": ""
}
|
q276901
|
SignalHandler.pause
|
test
|
def pause(self, signum, seconds=0, callback_function=None):
"""
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
"""
if callback_function is None:
callback_function = self.default_handler
if seconds > 0:
self.log.info("Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT".format(seconds))
signal.signal(signal.SIGALRM, callback_function)
signal.alarm(seconds)
else:
self.log.info('Signal handler pausing until it receives SIGALRM or SIGCONT')
signal.signal(signal.SIGCONT, callback_function)
signal.pause()
self.log.info('Signal handler resuming from pause')
if signum == signal.SIGALRM:
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q276902
|
SignalHandler.abort
|
test
|
def abort(self, signum):
""" Run all abort tasks, then all exit tasks, then exit with error
return status"""
self.log.info('Signal handler received abort request')
self._abort(signum)
self._exit(signum)
os._exit(1)
|
python
|
{
"resource": ""
}
|
q276903
|
SignalHandler.status
|
test
|
def status(self, signum):
""" Run all status tasks, then run all tasks in the resume queue"""
self.log.debug('Signal handler got status signal')
new_status_callbacks = []
for status_call in self.status_callbacks:
# If callback is non persistent we remove it
try:
self.log.debug("Calling {0}({1},{2})".format(status_call['function'].__name__, status_call['args'], status_call['kwargs']))
except AttributeError:
self.log.debug("Calling unbound function/method {0}".format(str(status_call)))
apply(status_call['function'], status_call['args'], status_call['kwargs'])
if status_call['persistent']:
new_status_callbacks.append(status_call)
self.status_callbacks = new_status_callbacks
self._resume(signum)
|
python
|
{
"resource": ""
}
|
q276904
|
SignalHandler._unreg_event
|
test
|
def _unreg_event(self, event_list, event):
""" Tries to remove a registered event without triggering it """
try:
self.log.debug("Removing event {0}({1},{2})".format(event['function'].__name__, event['args'], event['kwargs']))
except AttributeError:
self.log.debug("Removing event {0}".format(str(event)))
try:
event_list.remove(event)
except ValueError:
try:
self.log.warn("Unable to remove event {0}({1},{2}) , not found in list: {3}".format(event['function'].__name__, event['args'], event['kwargs'], event_list))
except AttributeError:
self.log.debug("Unable to remove event {0}".format(str(event)))
raise KeyError('Unable to unregister the specified event from the signals specified')
|
python
|
{
"resource": ""
}
|
q276905
|
Connection.fetch_metric
|
test
|
def fetch_metric(self, metric, start, end, tags={}, aggregator="sum",
downsample=None, ms_resolution=True):
"""Fetch time series data from OpenTSDB
Parameters:
metric:
A string representing a valid OpenTSDB metric.
tags:
A dict mapping tag names to tag values. Tag names and values are
always strings.
{ 'user_id': '44' }
start:
A datetime.datetime-like object representing the start of the
range to query over.
end:
A datetime.datetime-like object representing the end of the
range to query over.
aggregator:
The function for merging multiple time series together. For
example, if the "user_id" tag is not specified, this aggregator
function is used to combine all heart rate time series into one
time series. (Yes, this isn't very useful.)
For queries that return only one time series, this parameter is
not relevant.
Valid values: "sum", "min", "max", "avg", "dev"
See: http://opentsdb.net/docs/build/html/user_guide/query/aggregators.html
downsampling:
A relative time interval to "downsample". This isn't true
downsampling; rather, if you specify a downsampling of "5m"
(five minutes), OpenTSDB will split data into five minute
intervals, and return one data point in the middle of each
interval whose value is the average of all data points within
that interval.
Valid relative time values are strings of the following format:
"<amount><time_unit>"
Valid time units: "ms", "s", "m", "h", "d", "w", "n", "y"
Date and time format: http://opentsdb.net/docs/build/html/user_guide/query/dates.html
ms_resolution:
Whether or not to output data point timestamps in milliseconds
or seconds. If this flag is false and there are multiple
data points within a second, those data points will be down
sampled using the query's aggregation function.
Returns:
A dict mapping timestamps to data points
"""
query = "{aggregator}:{downsample}{metric}{{{tags}}}".format(
aggregator=aggregator,
downsample=downsample + "-avg:" if downsample else "",
metric=metric,
tags=','.join("%s=%s" % (k, v) for k, v in tags.items())
)
params = {
'ms': ms_resolution,
'start': '{0:.3f}'.format(start.timestamp()),
'end': '{0:.3f}'.format(end.timestamp()),
'm': query
}
response = self.__request("/query", params)
if response.status_code == 200:
try:
return response.json()[0]['dps']
except IndexError:
# empty data set
return {}
raise QueryError(response.json())
|
python
|
{
"resource": ""
}
|
q276906
|
Connection.fetch_sorted_metric
|
test
|
def fetch_sorted_metric(self, *args, **kwargs):
"""Fetch and sort time series data from OpenTSDB
Takes the same parameters as `fetch_metric`, but returns a list of
(timestamp, value) tuples sorted by timestamp.
"""
return sorted(self.fetch_metric(*args, **kwargs).items(),
key=lambda x: float(x[0]))
|
python
|
{
"resource": ""
}
|
q276907
|
pfcollect
|
test
|
def pfcollect(iterable, n=None):
"""Collects and returns a list of values from the given iterable. If
the n parameter is not specified, collects all values from the
iterable.
:param iterable: An iterable yielding values for the list
:param n: An optional maximum number of items to collect
:rtype: List of values from the iterable
Example::
>>> @pointfree
... def fibonaccis():
... a, b = 0, 1
... while True:
... a, b = b, a+b
... yield a
>>> (pfcollect(n=10) * fibonaccis)()
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
if n:
return list(itertools.islice(iterable, n))
else:
return list(iterable)
|
python
|
{
"resource": ""
}
|
q276908
|
pfprint
|
test
|
def pfprint(item, end='\n', file=None):
"""Prints an item.
:param item: The item to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> from operator import add
>>> fn = pfreduce(add, initial=0) >> pfprint
>>> fn([1, 2, 3, 4])
10
"""
# Can't just make sys.stdout the file argument's default value, because
# then we would be capturing the stdout file descriptor, and then
# doctest -- which works by redefining sys.stdout -- would fail:
if file is None:
file = sys.stdout
print(item, end=end, file=file)
|
python
|
{
"resource": ""
}
|
q276909
|
pfprint_all
|
test
|
def pfprint_all(iterable, end='\n', file=None):
"""Prints each item from an iterable.
:param iterable: An iterable yielding values to print
:param end: String to append to the end of printed output
:param file: File to which output is printed
:rtype: None
Example::
>>> @pointfree
... def prefix_all(prefix, iterable):
... for item in iterable:
... yield "%s%s" % (prefix, item)
>>> fn = prefix_all("An item: ") >> pfprint_all
>>> fn(["foo", "bar", "baz"])
An item: foo
An item: bar
An item: baz
"""
for item in iterable:
pfprint(item, end=end, file=file)
|
python
|
{
"resource": ""
}
|
q276910
|
partial.__sig_from_func
|
test
|
def __sig_from_func(self, func):
"""Extract function signature, default arguments, keyword-only
arguments, and whether or not variable positional or keyword
arguments are allowed. This also supports calling unbound instance
methods by passing an object instance as the first argument;
however, unbound classmethod and staticmethod objects are not
callable, so we do not attempt to support them here."""
if isinstance(func, types.MethodType):
# A bound instance or class method.
argspec = getfullargspec(func.__func__)
self.pargl = argspec[0][1:]
else:
# A regular function, an unbound instance method, or a
# bound static method.
argspec = getfullargspec(func)
self.pargl = argspec[0][:]
if argspec[3] is not None:
def_offset = len(self.pargl) - len(argspec[3])
self.def_argv = dict((self.pargl[def_offset+i],argspec[3][i]) \
for i in range(len(argspec[3])))
else:
self.def_argv = {}
self.var_pargs = argspec[1] is not None
self.var_kargs = argspec[2] is not None
self.kargl = argspec[4]
# We need keyword-only arguments' default values too.
if argspec[5] is not None:
self.def_argv.update(argspec[5])
|
python
|
{
"resource": ""
}
|
q276911
|
partial.__sig_from_partial
|
test
|
def __sig_from_partial(self, inst):
"""Extract function signature from an existing partial instance."""
self.pargl = list(inst.pargl)
self.kargl = list(inst.kargl)
self.def_argv = inst.def_argv.copy()
self.var_pargs = inst.var_pargs
self.var_kargs = inst.var_kargs
|
python
|
{
"resource": ""
}
|
q276912
|
partial.__new_argv
|
test
|
def __new_argv(self, *new_pargs, **new_kargs):
"""Calculate new argv and extra_argv values resulting from adding
the specified positional and keyword arguments."""
new_argv = self.argv.copy()
new_extra_argv = list(self.extra_argv)
for v in new_pargs:
arg_name = None
for name in self.pargl:
if not name in new_argv:
arg_name = name
break
if arg_name:
new_argv[arg_name] = v
elif self.var_pargs:
new_extra_argv.append(v)
else:
num_prev_pargs = len([name for name in self.pargl if name in self.argv])
raise TypeError("%s() takes exactly %d positional arguments (%d given)" \
% (self.__name__,
len(self.pargl),
num_prev_pargs + len(new_pargs)))
for k,v in new_kargs.items():
if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):
raise TypeError("%s() got an unexpected keyword argument '%s'" \
% (self.__name__, k))
new_argv[k] = v
return (new_argv, new_extra_argv)
|
python
|
{
"resource": ""
}
|
q276913
|
ignore_certain_metainf_files
|
test
|
def ignore_certain_metainf_files(filename):
"""
We do not support multiple signatures in XPI signing because the client
side code makes some pretty reasonable assumptions about a single signature
on any given JAR. This function returns True if the file name given is one
that we dispose of to prevent multiple signatures.
"""
ignore = ("META-INF/manifest.mf",
"META-INF/*.sf",
"META-INF/*.rsa",
"META-INF/*.dsa",
"META-INF/ids.json")
for glob in ignore:
# Explicitly match against all upper case to prevent the kind of
# runtime errors that lead to https://bugzil.la/1169574
if fnmatch.fnmatchcase(filename.upper(), glob.upper()):
return True
return False
|
python
|
{
"resource": ""
}
|
q276914
|
file_key
|
test
|
def file_key(filename):
'''Sort keys for xpi files
The filenames in a manifest are ordered so that files not in a
directory come before files in any directory, ordered
alphabetically but ignoring case, with a few exceptions
(install.rdf, chrome.manifest, icon.png and icon64.png come at the
beginning; licenses come at the end).
This order does not appear to affect anything in any way, but it
looks nicer.
'''
prio = 4
if filename == 'install.rdf':
prio = 1
elif filename in ["chrome.manifest", "icon.png", "icon64.png"]:
prio = 2
elif filename in ["MPL", "GPL", "LGPL", "COPYING",
"LICENSE", "license.txt"]:
prio = 5
return (prio, os.path.split(filename.lower()))
|
python
|
{
"resource": ""
}
|
q276915
|
vlq2int
|
test
|
def vlq2int(data):
"""Read one VLQ-encoded integer value from an input data stream."""
# The VLQ is little-endian.
byte = ord(data.read(1))
value = byte & 0x7F
shift = 1
while byte & 0x80 != 0:
byte = ord(data.read(1))
value = ((byte & 0x7F) << shift * 7) | value
shift += 1
return value
|
python
|
{
"resource": ""
}
|
q276916
|
read_table
|
test
|
def read_table(data, fields):
"""Read a table structure.
These are used by Blizzard to collect pieces of data together. Each
value is prefixed by two bytes, first denoting (doubled) index and the
second denoting some sort of key -- so far it has always been '09'. The
actual value follows as a Variable-Length Quantity, also known as uintvar.
The actual value is also doubled.
In some tables the keys might jump from 0A 09 to 04 09 for example.
I have no idea why this happens, as the next logical key is 0C. Perhaps
it's a table in a table? Some sort of headers might exist for these
tables, I'd imagine at least denoting length. Further research required.
"""
def read_field(field_name):
data.read(2)
table[field_name] = vlq2int(data) / 2
# Discard unknown fields.
if field_name == 'unknown':
del table[field_name]
table = {}
for field in fields:
read_field(field)
return table
|
python
|
{
"resource": ""
}
|
q276917
|
SC2Replay._parse_header
|
test
|
def _parse_header(self):
"""Parse the user data header portion of the replay."""
header = OrderedDict()
user_data_header = self.archive.header['user_data_header']['content']
if re.search(r'StarCraft II replay', user_data_header):
user_data_header = StringIO.StringIO(user_data_header)
user_data_header.seek(30) # Just skip the beginning.
header.update(read_table(user_data_header, ['release_flag',
'major_version',
'minor_version',
'maintenance_version',
'build_number',
'unknown',
'unknown',
'duration']))
# Some post processing is required.
header['version'] = '%s.%s.%s.%s' % (header['major_version'],
header['minor_version'],
header['maintenance_version'],
header['build_number'])
if not header['release_flag']:
header['version'] += ' (dev)'
# Duration is actually stored as 1/16th of a seconds. Go figure.
header['duration'] /= 16
else:
raise ValueError("The given file is not a StarCraft II replay.")
return header
|
python
|
{
"resource": ""
}
|
q276918
|
SC2Replay.get_duration
|
test
|
def get_duration(self, seconds):
"""Transform duration into a human-readable form."""
duration = ""
minutes, seconds = divmod(seconds, 60)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
duration = "%sh " % hours
duration += "%sm %ss" % (minutes, seconds)
return duration
|
python
|
{
"resource": ""
}
|
q276919
|
SC2Replay.print_details
|
test
|
def print_details(self):
"""Print a summary of the game details."""
print 'Map ', self.map
print 'Duration ', self.duration
print 'Version ', self.version
print 'Team Player Race Color'
print '-----------------------------------'
for player in self.players:
print '{team:<5} {name:12} {race:10} {color}'.format(**player)
|
python
|
{
"resource": ""
}
|
q276920
|
FormEvents.data
|
test
|
def data(self):
"""
This function gets back data that the user typed.
"""
self.batch_name_value = self.ui.batch_name_value.text()
self.saa_values = self.ui.saa_values.text()
self.sza_values = self.ui.sza_values.text()
self.p_values = self.ui.p_values.text()
self.x_value = self.ui.x_value.text()
self.y_value = self.ui.y_value.text()
self.g_value = self.ui.g_value.text()
self.s_value = self.ui.s_value.text()
self.z_value = self.ui.z_value.text()
self.wavelength_values = self.ui.wavelength_values.text()
self.verbose_value = self.ui.verbose_value.text()
self.phytoplankton_path = self.ui.phyto_path.text()
self.bottom_path = self.ui.bottom_path.text()
self.executive_path = self.ui.exec_path.text()
self.nb_cpu = self.ui.nb_cpu.currentText()
self.report_parameter_value = str(self.ui.report_parameter_value.text())
|
python
|
{
"resource": ""
}
|
q276921
|
FormEvents.search_file_result
|
test
|
def search_file_result(self):
"""
This function once the file found, display data's file and the graphic associated.
"""
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.result_file = self.file_dialog.getOpenFileName(caption=str("Open Report File"), directory="./outputs")
if not self.result_file == '':
self.ui.show_all_curves.setDisabled(False)
self.ui.show_grid.setDisabled(False)
self.data_processing()
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.authorized_display = True
|
python
|
{
"resource": ""
}
|
q276922
|
FormEvents.write_to_file
|
test
|
def write_to_file(self):
"""
This function calls "gui_batch.py" with inputs values to write the batch file.
"""
bt = BatchFile(self.batch_name_value, self.p_values, self.x_value, self.y_value, self.g_value, self.s_value,
self.z_value, self.wavelength_values, self.verbose_value, self.phytoplankton_path,
self.bottom_path, self.nb_cpu, self.executive_path, self.saa_values,
self.sza_values, self.report_parameter_value)
# bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
bt.write_batch_to_file(str(self.batch_name_value + "_batch.txt"))
|
python
|
{
"resource": ""
}
|
q276923
|
FormEvents.data_processing
|
test
|
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close()
|
python
|
{
"resource": ""
}
|
q276924
|
FormEvents.display_the_graphic_connection
|
test
|
def display_the_graphic_connection(self):
"""
The following permits to attribute the function "display_the_graphic" to the slider.
Because, to make a connection, we can not have parameters for the function, but "display_the_graphic" has some.
"""
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
python
|
{
"resource": ""
}
|
q276925
|
FormEvents.print_graphic_information
|
test
|
def print_graphic_information(self, num_curve, information):
"""
This function displays information about curves.
Inputs ; num_curve ; The index of the curve's line that we have to display.
information ; The array which contains the information, of all curves to display.
"""
"""In this function, the best would to create labels each time we need to create one,
following the number of labels in label_information.
#self.essai = QtGui.QLabel(self.ui.tab)
#self.essai.setGeometry(PyQt4.QtCore.QRect(870,650,111,16))
#self.essai.setText("ESSAI")
"""
label_information = information[0]
data_information = information[1:]
count_nb_label = 0 # Iterator on all labels of label_information
nb_label = len(label_information)
while count_nb_label <= nb_label:
self.ui.column1_label.setText(label_information[0].strip('\"'))
self.ui.column2_label.setText(label_information[1].strip('\"'))
self.ui.column3_label.setText(label_information[2].strip('\"'))
self.ui.column4_label.setText(label_information[3].strip('\"'))
self.ui.column5_label.setText(label_information[4].strip('\"'))
self.ui.column6_label.setText(label_information[5].strip('\"'))
self.ui.column7_label.setText(label_information[6].strip('\"'))
self.ui.column8_label.setText(label_information[7].strip('\"'))
count_nb_label += 1
line_of_data = 0 # Iterator on each line of data_information.
while line_of_data < len(data_information):
if line_of_data == num_curve:
self.ui.column1_result.setText(data_information[line_of_data][0])
self.ui.column2_result.setText(data_information[line_of_data][1])
self.ui.column3_result.setText(data_information[line_of_data][2])
self.ui.column4_result.setText(data_information[line_of_data][3])
self.ui.column5_result.setText(data_information[line_of_data][4])
self.ui.column6_result.setText(data_information[line_of_data][5])
self.ui.column7_result.setText(data_information[line_of_data][6])
self.ui.column8_result.setText(data_information[line_of_data][7])
line_of_data += 1
|
python
|
{
"resource": ""
}
|
q276926
|
FormEvents.display_error_message
|
test
|
def display_error_message(self):
"""
This function displays an error message when a wrong value is typed.
"""
self.ui.error_label.setScaledContents(True) # Warning image shown.
self.ui.error_text_label.show() # Warning message shown.
self.ui.error_text_label.setStyleSheet('color: red')
|
python
|
{
"resource": ""
}
|
q276927
|
FormEvents.hide_error_message
|
test
|
def hide_error_message(self):
"""
This function hides the error message when all values are correct.
"""
self.ui.error_label.setScaledContents(False) # Warning image hiden.
self.ui.error_text_label.hide()
|
python
|
{
"resource": ""
}
|
q276928
|
FormEvents.run
|
test
|
def run(self):
"""
This function executes planarRad using the batch file.
"""
"""
Error when planarRad start : /bin/sh: 1: ../planarrad.py: not found
"""
print('Executing planarrad')
# If we are not in the reverse_mode :
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.data()
self.check_values()
if self.without_error == False:
self.display_error_message()
elif self.without_error == True:
self.is_running = True
self.hide_error_message()
self.write_to_file()
os.chdir('./')
self.progress_bar()
this_dir = os.path.dirname(os.path.realpath(__file__)).rstrip('gui/')
batch_file = os.path.join(this_dir, "inputs/batch_files/" + str(self.batch_name_value) + "_batch.txt")
print(batch_file)
self.p = subprocess.Popen(
["./planarrad.py -i " + batch_file],
shell=True)
if self.ui.progressBar.value() == 100:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
|
python
|
{
"resource": ""
}
|
q276929
|
FormEvents.cancel_planarrad
|
test
|
def cancel_planarrad(self):
"""
This function cancels PlanarRad.
"""
"""
This function needs to be tested. We don't know if she works.
"""
if (self.is_running == True) & (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
cancel = QtGui.QMessageBox.question(self.ui.cancel, 'Cancel PlanarRad', "Are you sure to cancel ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if cancel == QtGui.QMessageBox.Yes:
self.is_running = False
os.kill(self.p.pid, signal.SIGTERM)
print("Necessary to check if cancel_planarrad works well !")
self.ui.progressBar.reset()
else:
pass
|
python
|
{
"resource": ""
}
|
q276930
|
FormEvents.quit
|
test
|
def quit(self):
"""
This function quits PlanarRad, checking if PlanarRad is running before.
"""
"""
Nothing programmed for displaying a message box when the user clicks on the window cross in order to quit.
"""
if self.is_running == True:
warning_planarrad_running = QtGui.QMessageBox.warning(self.ui.quit, 'Warning !',
"PlanarRad is running. Stop it before quit !",
QtGui.QMessageBox.Ok)
else:
quit = QtGui.QMessageBox.question(self.ui.quit, 'Quit PlanarRad', "Are you sure to quit ?",
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if quit == QtGui.QMessageBox.Yes:
QtGui.qApp.quit()
|
python
|
{
"resource": ""
}
|
q276931
|
FormEvents.save_figure
|
test
|
def save_figure(self):
"""
This function programs the button to save the figure displayed
and save it in a png file in the current repository.
"""
"""
Increment the name of the figure in order to not erase the previous figure if the user use always this method.
The png file is put in the "Artists_saved" file localized in the "planarradpy" folder.
"""
default_name = 'Default_figure.png'
self.ui.graphic_widget.canvas.print_figure(default_name)
src = './' + default_name
dst = './Artists_saved'
os.system("mv" + " " + src + " " + dst)
|
python
|
{
"resource": ""
}
|
q276932
|
FormEvents.open_log_file
|
test
|
def open_log_file(self):
"""
The following opens the log file of PlanarRad.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
f = open(os.path.expanduser('~/.planarradpy/log/libplanarradpy.log'))
# self.uiLog.textEdit.setText(str(f.readlines()))
self.uiLog.textEdit.setPlainText(str(f.read()))
self.log_window.show()
|
python
|
{
"resource": ""
}
|
q276933
|
FormEvents.open_documentation
|
test
|
def open_documentation(self):
"""
The following opens the documentation file.
"""
"""
TO DO.
"""
# webbrowser.open('https://marrabld.github.io/planarradpy/')
window = Window()
html = QtCore.QUrl.fromLocalFile(os.path.join(os.getcwd(), './docs/_build/html/index.html')) #open('./docs/_build/html/index.html').read()
#window.show()
window.view.load(html)
window.show()
window.exec_()
|
python
|
{
"resource": ""
}
|
q276934
|
FormEvents.prerequisite_actions
|
test
|
def prerequisite_actions(self):
"""
This function does all required actions at the beginning when we run the GUI.
"""
self.hide_error_message()
self.ui.show_all_curves.setDisabled(True)
self.ui.sens.setDisabled(True)
self.ui.show_grid.setDisabled(True)
pathname = os.path.dirname(sys.argv[0])
path = os.path.abspath(pathname)
# self.phytoplankton_path = self.ui.phyto_path.setText(path.replace('gui', 'inputs/iop_files'))
# self.bottom_path = self.ui.bottom_path.setText(path.replace('gui', 'inputs/bottom_files'))
# self.executive_path = self.ui.exec_path.setText("Decide where will be 'jude2_install/bin'")
self.verbose_value = self.ui.verbose_value.setText("6")
self.report_parameter_value = self.ui.report_parameter_value.setText("Rrs")
self.ui.progressBar.reset()
|
python
|
{
"resource": ""
}
|
q276935
|
FormEvents.click
|
test
|
def click(self, event):
"""
This function intercepts the mouse's right click and its position.
"""
if event.button == 3:
if self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE:
self.pos = QtGui.QCursor().pos()
self.graphic_context_menu(self.pos)
|
python
|
{
"resource": ""
}
|
q276936
|
FormEvents.mouse_move
|
test
|
def mouse_move(self, event):
"""
The following gets back coordinates of the mouse on the canvas.
"""
if (self.ui.tabWidget.currentIndex() == TabWidget.NORMAL_MODE):
self.posX = event.xdata
self.posY = event.ydata
self.graphic_target(self.posX, self.posY)
|
python
|
{
"resource": ""
}
|
q276937
|
FormEvents.graphic_target
|
test
|
def graphic_target(self, x, y):
"""
The following update labels about mouse coordinates.
"""
if self.authorized_display == True:
try:
self.display_the_graphic(self.num_line, self.wavelength, self.data_wanted, self.information)
self.ui.mouse_coordinate.setText("(%0.3f, %0.3f)" % (x, y))
except:
pass
|
python
|
{
"resource": ""
}
|
q276938
|
genesis_signing_lockset
|
test
|
def genesis_signing_lockset(genesis, privkey):
"""
in order to avoid a complicated bootstrapping, we define
the genesis_signing_lockset as a lockset with one vote by any validator.
"""
v = VoteBlock(0, 0, genesis.hash)
v.sign(privkey)
ls = LockSet(num_eligible_votes=1)
ls.add(v)
assert ls.has_quorum
return ls
|
python
|
{
"resource": ""
}
|
q276939
|
Signed.sign
|
test
|
def sign(self, privkey):
"""Sign this with a private key"""
if self.v:
raise InvalidSignature("already signed")
if privkey in (0, '', '\x00' * 32):
raise InvalidSignature("Zero privkey cannot sign")
rawhash = sha3(rlp.encode(self, self.__class__.exclude(['v', 'r', 's'])))
if len(privkey) == 64:
privkey = encode_privkey(privkey, 'bin')
pk = PrivateKey(privkey, raw=True)
signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))
signature = signature[0] + chr(signature[1])
self.v = ord(signature[64]) + 27
self.r = big_endian_to_int(signature[0:32])
self.s = big_endian_to_int(signature[32:64])
self._sender = None
return self
|
python
|
{
"resource": ""
}
|
q276940
|
Signed.hash
|
test
|
def hash(self):
"signatures are non deterministic"
if self.sender is None:
raise MissingSignatureError()
class HashSerializable(rlp.Serializable):
fields = [(field, sedes) for field, sedes in self.fields
if field not in ('v', 'r', 's')] + [('_sender', binary)]
_sedes = None
return sha3(rlp.encode(self, HashSerializable))
|
python
|
{
"resource": ""
}
|
q276941
|
LockSet.check
|
test
|
def check(self):
"either invalid or one of quorum, noquorum, quorumpossible"
if not self.is_valid:
return True
test = (self.has_quorum, self.has_quorum_possible, self.has_noquorum)
assert 1 == len([x for x in test if x is not None])
return True
|
python
|
{
"resource": ""
}
|
q276942
|
IOU.issue_funds
|
test
|
def issue_funds(ctx, amount='uint256', rtgs_hash='bytes32', returns=STATUS):
"In the IOU fungible the supply is set by Issuer, who issue funds."
# allocate new issue as result of a new cash entry
ctx.accounts[ctx.msg_sender] += amount
ctx.issued_amounts[ctx.msg_sender] += amount
# Store hash(rtgs)
ctx.Issuance(ctx.msg_sender, rtgs_hash, amount)
return OK
|
python
|
{
"resource": ""
}
|
q276943
|
HeightManager.last_lock
|
test
|
def last_lock(self):
"highest lock on height"
rs = list(self.rounds)
assert len(rs) < 2 or rs[0] > rs[1] # FIXME REMOVE
for r in self.rounds: # is sorted highest to lowest
if self.rounds[r].lock is not None:
return self.rounds[r].lock
|
python
|
{
"resource": ""
}
|
q276944
|
HeightManager.last_voted_blockproposal
|
test
|
def last_voted_blockproposal(self):
"the last block proposal node voted on"
for r in self.rounds:
if isinstance(self.rounds[r].proposal, BlockProposal):
assert isinstance(self.rounds[r].lock, Vote)
if self.rounds[r].proposal.blockhash == self.rounds[r].lock.blockhash:
return self.rounds[r].proposal
|
python
|
{
"resource": ""
}
|
q276945
|
HeightManager.last_valid_lockset
|
test
|
def last_valid_lockset(self):
"highest valid lockset on height"
for r in self.rounds:
ls = self.rounds[r].lockset
if ls.is_valid:
return ls
return None
|
python
|
{
"resource": ""
}
|
q276946
|
RoundManager.get_timeout
|
test
|
def get_timeout(self):
"setup a timeout for waiting for a proposal"
if self.timeout_time is not None or self.proposal:
return
now = self.cm.chainservice.now
round_timeout = ConsensusManager.round_timeout
round_timeout_factor = ConsensusManager.round_timeout_factor
delay = round_timeout * round_timeout_factor ** self.round
self.timeout_time = now + delay
return delay
|
python
|
{
"resource": ""
}
|
q276947
|
Synchronizer.on_proposal
|
test
|
def on_proposal(self, proposal, proto):
"called to inform about synced peers"
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto
|
python
|
{
"resource": ""
}
|
q276948
|
mk_privkeys
|
test
|
def mk_privkeys(num):
"make privkeys that support coloring, see utils.cstr"
privkeys = []
assert num <= num_colors
for i in range(num):
j = 0
while True:
k = sha3(str(j))
a = privtoaddr(k)
an = big_endian_to_int(a)
if an % num_colors == i:
break
j += 1
privkeys.append(k)
return privkeys
|
python
|
{
"resource": ""
}
|
q276949
|
Transport.delay
|
test
|
def delay(self, sender, receiver, packet, add_delay=0):
"""
bandwidths are inaccurate, as we don't account for parallel transfers here
"""
bw = min(sender.ul_bandwidth, receiver.dl_bandwidth)
delay = sender.base_latency + receiver.base_latency
delay += len(packet) / bw
delay += add_delay
return delay
|
python
|
{
"resource": ""
}
|
q276950
|
SlowTransport.deliver
|
test
|
def deliver(self, sender, receiver, packet):
"deliver on edge of timeout_window"
to = ConsensusManager.round_timeout
assert to > 0
print "in slow transport deliver"
super(SlowTransport, self).deliver(sender, receiver, packet, add_delay=to)
|
python
|
{
"resource": ""
}
|
q276951
|
chain_nac_proxy
|
test
|
def chain_nac_proxy(chain, sender, contract_address, value=0):
"create an object which acts as a proxy for the contract on the chain"
klass = registry[contract_address].im_self
assert issubclass(klass, NativeABIContract)
def mk_method(method):
def m(s, *args):
data = abi_encode_args(method, args)
block = chain.head_candidate
output = test_call(block, sender, contract_address, data)
if output is not None:
return abi_decode_return_vals(method, output)
return m
class cproxy(object):
pass
for m in klass._abi_methods():
setattr(cproxy, m.__func__.func_name, mk_method(m))
return cproxy()
|
python
|
{
"resource": ""
}
|
q276952
|
Registry.address_to_native_contract_class
|
test
|
def address_to_native_contract_class(self, address):
"returns class._on_msg_unsafe, use x.im_self to get class"
assert isinstance(address, bytes) and len(address) == 20
assert self.is_instance_address(address)
nca = self.native_contract_address_prefix + address[-4:]
return self.native_contracts[nca]
|
python
|
{
"resource": ""
}
|
q276953
|
Registry.register
|
test
|
def register(self, contract):
"registers NativeContract classes"
assert issubclass(contract, NativeContractBase)
assert len(contract.address) == 20
assert contract.address.startswith(self.native_contract_address_prefix)
if self.native_contracts.get(contract.address) == contract._on_msg:
log.debug("already registered", contract=contract, address=contract.address)
return
assert contract.address not in self.native_contracts, 'address already taken'
self.native_contracts[contract.address] = contract._on_msg
log.debug("registered native contract", contract=contract, address=contract.address)
|
python
|
{
"resource": ""
}
|
q276954
|
DuplicatesFilter.update
|
test
|
def update(self, data):
"returns True if unknown"
if data not in self.filter:
self.filter.append(data)
if len(self.filter) > self.max_items:
self.filter.pop(0)
return True
else:
self.filter.append(self.filter.pop(0))
return False
|
python
|
{
"resource": ""
}
|
q276955
|
ChainService.on_receive_transactions
|
test
|
def on_receive_transactions(self, proto, transactions):
"receives rlp.decoded serialized"
log.debug('----------------------------------')
log.debug('remote_transactions_received', count=len(transactions), remote_id=proto)
def _add_txs():
for tx in transactions:
self.add_transaction(tx, origin=proto)
gevent.spawn(_add_txs)
|
python
|
{
"resource": ""
}
|
q276956
|
img_from_vgg
|
test
|
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x
|
python
|
{
"resource": ""
}
|
q276957
|
img_to_vgg
|
test
|
def img_to_vgg(x):
'''Condition an image for use with the VGG16 model.'''
x = x[:,:,::-1] # to BGR
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
x = x.transpose((2, 0, 1))
return x
|
python
|
{
"resource": ""
}
|
q276958
|
VGG16.get_f_layer
|
test
|
def get_f_layer(self, layer_name):
'''Create a function for the response of a layer.'''
inputs = [self.net_input]
if self.learning_phase is not None:
inputs.append(K.learning_phase())
return K.function(inputs, [self.get_layer_output(layer_name)])
|
python
|
{
"resource": ""
}
|
q276959
|
VGG16.get_layer_output
|
test
|
def get_layer_output(self, name):
'''Get symbolic output of a layer.'''
if not name in self._f_layer_outputs:
layer = self.net.get_layer(name)
self._f_layer_outputs[name] = layer.output
return self._f_layer_outputs[name]
|
python
|
{
"resource": ""
}
|
q276960
|
VGG16.get_features
|
test
|
def get_features(self, x, layers):
'''Evaluate layer outputs for `x`'''
if not layers:
return None
inputs = [self.net.input]
if self.learning_phase is not None:
inputs.append(self.learning_phase)
f = K.function(inputs, [self.get_layer_output(layer_name) for layer_name in layers])
feature_outputs = f([x])
features = dict(zip(layers, feature_outputs))
return features
|
python
|
{
"resource": ""
}
|
q276961
|
create_key_file
|
test
|
def create_key_file(path):
"""
Creates a new encryption key in the path provided and sets the file
permissions. Setting the file permissions currently does not work
on Windows platforms because of the differences in how file
permissions are read and modified.
"""
iv = "{}{}".format(os.urandom(32), time.time())
new_key = generate_key(ensure_bytes(iv))
with open(path, "wb") as f:
f.write(base64.b64encode(new_key))
os.chmod(path, 0o400)
|
python
|
{
"resource": ""
}
|
q276962
|
TeradataBulkLoad.finish
|
test
|
def finish(self):
"""
Finishes the load job. Called automatically when the connection closes.
:return: The exit code returned when applying rows to the table
"""
if self.finished:
return self.exit_code
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
# TODO(chris): should this happen every time?
if self.applied_count > 0:
self._end_acquisition()
self._apply_rows()
self.exit_code = self._exit_code()
if self.exit_code != 0:
raise TeradataPTError("BulkLoad job finished with return code '{}'".format(self.exit_code))
self.finished = True
return self.exit_code
|
python
|
{
"resource": ""
}
|
q276963
|
TeradataBulkLoad.from_file
|
test
|
def from_file(self, filename, table=None, delimiter='|', null='NULL',
panic=True, quotechar='"', parse_dates=False):
"""
Load from a file into the target table, handling each step of the
load process.
Can load from text files, and properly formatted giraffez archive
files. In both cases, if Gzip compression is detected the file will be
decompressed while reading and handled appropriately. The encoding is
determined automatically by the contents of the file.
It is not necessary to set the columns in use prior to loading from a file.
In the case of a text file, the header is used to determine column names
and their order. Valid delimiters include '|', ',', and '\\t' (tab). When
loading an archive file, the column information is decoded alongside the data.
:param str filename: The location of the file to be loaded
:param str table: The name of the target table, if it was not specified
to the constructor for the isntance
:param str null: The string that indicates a null value in the rows being
inserted from a file. Defaults to 'NULL'
:param str delimiter: When loading a file, indicates that fields are
separated by this delimiter. Defaults to :code:`None`, which causes the
delimiter to be determined from the header of the file. In most
cases, this behavior is sufficient
:param str quotechar: The character used to quote fields containing special characters,
like the delimiter.
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:return: The output of the call to
:meth:`~giraffez.load.TeradataBulkLoad.finish`
:raises `giraffez.errors.GiraffeError`: if table was not set and :code:`table`
is :code:`None`, or if a Teradata error ocurred while retrieving table info.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
"""
if not self.table:
if not table:
raise GiraffeError("Table must be set or specified to load a file.")
self.table = table
if not isinstance(null, basestring):
raise GiraffeError("Expected 'null' to be str, received {}".format(type(null)))
with Reader(filename, delimiter=delimiter, quotechar=quotechar) as f:
if not isinstance(f.delimiter, basestring):
raise GiraffeError("Expected 'delimiter' to be str, received {}".format(type(delimiter)))
self.columns = f.header
if isinstance(f, ArchiveFileReader):
self.mload.set_encoding(ROW_ENCODING_RAW)
self.preprocessor = lambda s: s
if parse_dates:
self.preprocessor = DateHandler(self.columns)
self._initiate()
self.mload.set_null(null)
self.mload.set_delimiter(delimiter)
i = 0
for i, line in enumerate(f, 1):
self.put(line, panic=panic)
if i % self.checkpoint_interval == 1:
log.info("\rBulkLoad", "Processed {} rows".format(i), console=True)
checkpoint_status = self.checkpoint()
self.exit_code = self._exit_code()
if self.exit_code != 0:
return self.exit_code
log.info("\rBulkLoad", "Processed {} rows".format(i))
return self.finish()
|
python
|
{
"resource": ""
}
|
q276964
|
TeradataBulkLoad.put
|
test
|
def put(self, items, panic=True):
"""
Load a single row into the target table.
:param list items: A list of values in the row corresponding to the
fields specified by :code:`self.columns`
:param bool panic: If :code:`True`, when an error is encountered it will be
raised. Otherwise, the error will be logged and :code:`self.error_count`
is incremented.
:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there
are format errors in the row values.
:raises `giraffez.errors.GiraffeError`: if table name is not set.
:raises `giraffez.TeradataPTError`: if there is a problem
connecting to Teradata.
"""
if not self.initiated:
self._initiate()
try:
row_status = self.mload.put_row(self.preprocessor(items))
self.applied_count += 1
except (TeradataPTError, EncoderError) as error:
self.error_count += 1
if panic:
raise error
log.info("BulkLoad", error)
|
python
|
{
"resource": ""
}
|
q276965
|
TeradataBulkLoad.release
|
test
|
def release(self):
"""
Attempt release of target mload table.
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Cannot release. Target table has not been set.")
log.info("BulkLoad", "Attempting release for table {}".format(self.table))
self.mload.release(self.table)
|
python
|
{
"resource": ""
}
|
q276966
|
TeradataBulkLoad.tables
|
test
|
def tables(self):
"""
The names of the work tables used for loading.
:return: A list of four tables, each the name of the target table
with the added suffixes, "_wt", "_log", "_e1", and "_e2"
:raises `giraffez.errors.GiraffeError`: if table was not set by
the constructor, the :code:`TeradataBulkLoad.table`, or
:meth:`~giraffez.load.TeradataBulkLoad.from_file`.
"""
if self.table is None:
raise GiraffeError("Target table has not been set.")
return [
"{}_wt".format(self.table),
"{}_log".format(self.table),
"{}_e1".format(self.table),
"{}_e2".format(self.table),
]
|
python
|
{
"resource": ""
}
|
q276967
|
fix_compile
|
test
|
def fix_compile(remove_flags):
"""
Monkey-patch compiler to allow for removal of default compiler flags.
"""
import distutils.ccompiler
def _fix_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
for flag in remove_flags:
if flag in self.compiler_so:
self.compiler_so.remove(flag)
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros,
include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
return objects
distutils.ccompiler.CCompiler.compile = _fix_compile
|
python
|
{
"resource": ""
}
|
q276968
|
find_teradata_home
|
test
|
def find_teradata_home():
"""
Attempts to find the Teradata install directory with the defaults
for a given platform. Should always return `None` when the defaults
are not present and the TERADATA_HOME environment variable wasn't
explicitly set to the correct install location.
"""
if platform.system() == 'Windows':
# The default installation path for Windows is split between the
# Windows directories for 32-bit/64-bit applications. It is
# worth noting that Teradata archiecture installed should match
# the architecture of the Python architecture being used (i.e.
# TTU 32-bit is required /w Python 32-bit and TTU 64-bit is
# required for Python 64-bit).
if is_64bit():
return latest_teradata_version("C:/Program Files/Teradata/Client")
else:
return latest_teradata_version("C:/Program Files (x86)/Teradata/Client")
elif platform.system() == 'Linux':
return latest_teradata_version("/opt/teradata/client")
elif platform.system() == 'Darwin':
return latest_teradata_version("/Library/Application Support/teradata/client")
else:
# In the case nothing is found, the default for Linux is
# attempted as a last effort to find the correct install
# directory.
return latest_teradata_version("/opt/teradata/client")
|
python
|
{
"resource": ""
}
|
q276969
|
Secret.get
|
test
|
def get(self, key):
"""
Retrieve the decrypted value of a key in a giraffez
configuration file.
:param str key: The key used to lookup the encrypted value
"""
if not key.startswith("secure.") and not key.startswith("connections."):
key = "secure.{0}".format(key)
value = self.config.get_value(key)
if not isinstance(value, basestring):
value = None
return value
|
python
|
{
"resource": ""
}
|
q276970
|
Secret.set
|
test
|
def set(self, key, value):
"""
Set a decrypted value by key in a giraffez configuration file.
:param str key: The key used to lookup the encrypted value
:param value: Value to set at the given key, can be any value that is
YAML serializeable.
"""
if not key.startswith("secure."):
key = "secure.{0}".format(key)
self.config.set_value(key, value)
self.config.write()
|
python
|
{
"resource": ""
}
|
q276971
|
GiraffeShell.do_table
|
test
|
def do_table(self, line):
"""Display results in table format"""
if len(line) > 0:
if line.strip().lower() == "on":
log.write("Table ON")
self.table_output = True
return
elif line.strip().lower() == "off":
log.write("Table OFF")
self.table_output = False
return
log.write("Table output: {}".format("ON" if self.table_output else "OFF"))
|
python
|
{
"resource": ""
}
|
q276972
|
TeradataCmd.execute
|
test
|
def execute(self, command, coerce_floats=True, parse_dates=False, header=False, sanitize=True,
silent=False, panic=None, multi_statement=False, prepare_only=False):
"""
Execute commands using CLIv2.
:param str command: The SQL command to be executed
:param bool coerce_floats: Coerce Teradata decimal types into Python floats
:param bool parse_dates: Parses Teradata datetime types into Python datetimes
:param bool header: Include row header
:param bool sanitize: Whether or not to call :func:`~giraffez.sql.prepare_statement`
on the command
:param bool silent: Silence console logging (within this function only)
:param bool panic: If :code:`True`, when an error is encountered it will be
raised.
:param bool multi_statement: Execute in multi-statement mode
:param bool prepare_only: Only prepare the command (no results)
:return: a cursor over the results of each statement in the command
:rtype: :class:`~giraffez.cmd.Cursor`
:raises `giraffez.TeradataError`: if the query is invalid
:raises `giraffez.errors.GiraffeError`: if the return data could not be decoded
"""
if panic is None:
panic = self.panic
self.options("panic", panic)
self.options("multi-statement mode", multi_statement, 3)
if isfile(command):
self.options("file", command, 2)
with open(command, 'r') as f:
command = f.read()
else:
if log.level >= VERBOSE:
self.options("query", command, 2)
else:
self.options("query", truncate(command), 2)
if not silent and not self.silent:
log.info("Command", "Executing ...")
log.info(self.options)
if sanitize:
command = prepare_statement(command) # accounts for comments and newlines
log.debug("Debug[2]", "Command (sanitized): {!r}".format(command))
self.cmd.set_encoding(ENCODER_SETTINGS_DEFAULT)
return Cursor(self.cmd, command, multi_statement=multi_statement, header=header,
prepare_only=prepare_only, coerce_floats=coerce_floats, parse_dates=parse_dates,
panic=panic)
|
python
|
{
"resource": ""
}
|
q276973
|
Config.get_value
|
test
|
def get_value(self, key, default={}, nested=True, decrypt=True):
"""
Retrieve a value from the configuration based on its key. The key
may be nested.
:param str key: A path to the value, with nested levels joined by '.'
:param default: Value to return if the key does not exist (defaults to :code:`dict()`)
:param bool decrypt: If :code:`True`, decrypt an encrypted value before returning
(if encrypted). Defaults to :code:`True`.
"""
key = key.lstrip()
if key.endswith("."):
key = key[:-1]
if nested:
path = key.split(".")
curr = self.settings
for p in path[:-1]:
curr = curr.get(p, {})
try:
value = curr[path[-1]]
except KeyError:
return default
value = self.decrypt(value, path)
return value
else:
return self.settings.get(key, default)
|
python
|
{
"resource": ""
}
|
q276974
|
Config.write_default
|
test
|
def write_default(self, conf=None):
"""
A class method to write a default configuration file structure to a file.
Note that the contents of the file will be overwritten if it already exists.
:param str conf: The name of the file to write to. Defaults to :code:`None`, for ~/.girafferc
:return: The content written to the file
:rtype: str
"""
if conf is None:
conf = home_file(".girafferc")
contents = yaml.dump(default_config, default_flow_style=False)
with open(conf, "w") as f:
f.write(contents)
os.chmod(conf, 0o600)
return contents
|
python
|
{
"resource": ""
}
|
q276975
|
Columns.set_filter
|
test
|
def set_filter(self, names=None):
"""
Set the names of columns to be used when iterating through the list,
retrieving names, etc.
:param list names: A list of names to be used, or :code:`None` for all
"""
_names = []
if names:
for name in names:
_safe_name = safe_name(name)
if _safe_name not in self._column_map:
raise GiraffeTypeError("Column '{}' does not exist".format(name))
if _safe_name in _names:
continue
_names.append(_safe_name)
self._filtered_columns = _names
|
python
|
{
"resource": ""
}
|
q276976
|
TeradataBulkExport.to_archive
|
test
|
def to_archive(self, writer):
"""
Writes export archive files in the Giraffez archive format.
This takes a `giraffez.io.Writer` and writes archive chunks to
file until all rows for a given statement have been exhausted.
.. code-block:: python
with giraffez.BulkExport("database.table_name") as export:
with giraffez.Writer("database.table_name.tar.gz", 'wb', use_gzip=True) as out:
for n in export.to_archive(out):
print("Rows: {}".format(n))
:param `giraffez.io.Writer` writer: A writer handling the archive output
:rtype: iterator (yields ``int``)
"""
if 'b' not in writer.mode:
raise GiraffeError("Archive writer must be in binary mode")
writer.write(GIRAFFE_MAGIC)
writer.write(self.columns.serialize())
i = 0
for n, chunk in enumerate(self._fetchall(ROW_ENCODING_RAW), 1):
writer.write(chunk)
yield TeradataEncoder.count(chunk)
|
python
|
{
"resource": ""
}
|
q276977
|
TeradataBulkExport.to_str
|
test
|
def to_str(self, delimiter='|', null='NULL'):
"""
Sets the current encoder output to Python `str` and returns
a row iterator.
:param str null: The string representation of null values
:param str delimiter: The string delimiting values in the output
string
:rtype: iterator (yields ``str``)
"""
self.export.set_null(null)
self.export.set_delimiter(delimiter)
self.options("delimiter", escape_string(delimiter), 2)
self.options("null", null, 3)
return self._fetchall(ENCODER_SETTINGS_STRING, coerce_floats=False)
|
python
|
{
"resource": ""
}
|
q276978
|
float_with_multiplier
|
test
|
def float_with_multiplier(string):
"""Convert string with optional k, M, G, T multiplier to float"""
match = re_float_with_multiplier.search(string)
if not match or not match.group('num'):
raise ValueError('String "{}" is not numeric!'.format(string))
num = float(match.group('num'))
multi = match.group('multi')
if multi:
try:
num *= multipliers[multi]
except KeyError:
raise ValueError('Unknown multiplier: {}'.format(multi))
return num
|
python
|
{
"resource": ""
}
|
q276979
|
specific_gains
|
test
|
def specific_gains(string):
"""Convert string with gains of individual amplification elements to dict"""
if not string:
return {}
gains = {}
for gain in string.split(','):
amp_name, value = gain.split('=')
gains[amp_name.strip()] = float(value.strip())
return gains
|
python
|
{
"resource": ""
}
|
q276980
|
device_settings
|
test
|
def device_settings(string):
"""Convert string with SoapySDR device settings to dict"""
if not string:
return {}
settings = {}
for setting in string.split(','):
setting_name, value = setting.split('=')
settings[setting_name.strip()] = value.strip()
return settings
|
python
|
{
"resource": ""
}
|
q276981
|
wrap
|
test
|
def wrap(text, indent=' '):
"""Wrap text to terminal width with default indentation"""
wrapper = textwrap.TextWrapper(
width=int(os.environ.get('COLUMNS', 80)),
initial_indent=indent,
subsequent_indent=indent
)
return '\n'.join(wrapper.wrap(text))
|
python
|
{
"resource": ""
}
|
q276982
|
detect_devices
|
test
|
def detect_devices(soapy_args=''):
"""Returns detected SoapySDR devices"""
devices = simplesoapy.detect_devices(soapy_args, as_string=True)
text = []
text.append('Detected SoapySDR devices:')
if devices:
for i, d in enumerate(devices):
text.append(' {}'.format(d))
else:
text.append(' No devices found!')
return (devices, '\n'.join(text))
|
python
|
{
"resource": ""
}
|
q276983
|
PSD.set_center_freq
|
test
|
def set_center_freq(self, center_freq):
"""Set center frequency and clear averaged PSD data"""
psd_state = {
'repeats': 0,
'freq_array': self._base_freq_array + self._lnb_lo + center_freq,
'pwr_array': None,
'update_lock': threading.Lock(),
'futures': [],
}
return psd_state
|
python
|
{
"resource": ""
}
|
q276984
|
PSD.result
|
test
|
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array)
|
python
|
{
"resource": ""
}
|
q276985
|
PSD.wait_for_result
|
test
|
def wait_for_result(self, psd_state):
"""Wait for all PSD threads to finish and return result"""
if len(psd_state['futures']) > 1:
concurrent.futures.wait(psd_state['futures'])
elif psd_state['futures']:
psd_state['futures'][0].result()
return self.result(psd_state)
|
python
|
{
"resource": ""
}
|
q276986
|
PSD.update
|
test
|
def update(self, psd_state, samples_array):
"""Compute PSD from samples and update average for given center frequency"""
freq_array, pwr_array = simplespectral.welch(samples_array, self._sample_rate, nperseg=self._bins,
window=self._fft_window, noverlap=self._fft_overlap_bins,
detrend=self._detrend)
if self._remove_dc:
pwr_array[0] = (pwr_array[1] + pwr_array[-1]) / 2
with psd_state['update_lock']:
psd_state['repeats'] += 1
if psd_state['pwr_array'] is None:
psd_state['pwr_array'] = pwr_array
else:
psd_state['pwr_array'] += pwr_array
|
python
|
{
"resource": ""
}
|
q276987
|
SoapyPowerBinFormat.read
|
test
|
def read(self, f):
"""Read data from file-like object"""
magic = f.read(len(self.magic))
if not magic:
return None
if magic != self.magic:
raise ValueError('Magic bytes not found! Read data: {}'.format(magic))
header = self.header._make(
self.header_struct.unpack(f.read(self.header_struct.size))
)
pwr_array = numpy.fromstring(f.read(header.size), dtype='float32')
return (header, pwr_array)
|
python
|
{
"resource": ""
}
|
q276988
|
SoapyPowerBinFormat.write
|
test
|
def write(self, f, time_start, time_stop, start, stop, step, samples, pwr_array):
"""Write data to file-like object"""
f.write(self.magic)
f.write(self.header_struct.pack(
self.version, time_start, time_stop, start, stop, step, samples, pwr_array.nbytes
))
#pwr_array.tofile(f)
f.write(pwr_array.tobytes())
f.flush()
|
python
|
{
"resource": ""
}
|
q276989
|
ThreadPoolExecutor.submit
|
test
|
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Count maximum reached work queue size in ThreadPoolExecutor.max_queue_size_reached.
"""
future = super().submit(fn, *args, **kwargs)
work_queue_size = self._work_queue.qsize()
if work_queue_size > self.max_queue_size_reached:
self.max_queue_size_reached = work_queue_size
return future
|
python
|
{
"resource": ""
}
|
q276990
|
SoapyPower.time_to_repeats
|
test
|
def time_to_repeats(self, bins, integration_time):
"""Convert integration time to number of repeats"""
return math.ceil((self.device.sample_rate * integration_time) / bins)
|
python
|
{
"resource": ""
}
|
q276991
|
SoapyPower.freq_plan
|
test
|
def freq_plan(self, min_freq, max_freq, bins, overlap=0, quiet=False):
"""Returns list of frequencies for frequency hopping"""
bin_size = self.bins_to_bin_size(bins)
bins_crop = round((1 - overlap) * bins)
sample_rate_crop = (1 - overlap) * self.device.sample_rate
freq_range = max_freq - min_freq
hopping = True if freq_range >= sample_rate_crop else False
hop_size = self.nearest_freq(sample_rate_crop, bin_size)
hops = math.ceil(freq_range / hop_size) if hopping else 1
min_center_freq = min_freq + (hop_size / 2) if hopping else min_freq + (freq_range / 2)
max_center_freq = min_center_freq + ((hops - 1) * hop_size)
freq_list = [min_center_freq + (i * hop_size) for i in range(hops)]
if not quiet:
logger.info('overlap: {:.5f}'.format(overlap))
logger.info('bin_size: {:.2f} Hz'.format(bin_size))
logger.info('bins: {}'.format(bins))
logger.info('bins (after crop): {}'.format(bins_crop))
logger.info('sample_rate: {:.3f} MHz'.format(self.device.sample_rate / 1e6))
logger.info('sample_rate (after crop): {:.3f} MHz'.format(sample_rate_crop / 1e6))
logger.info('freq_range: {:.3f} MHz'.format(freq_range / 1e6))
logger.info('hopping: {}'.format('YES' if hopping else 'NO'))
logger.info('hop_size: {:.3f} MHz'.format(hop_size / 1e6))
logger.info('hops: {}'.format(hops))
logger.info('min_center_freq: {:.3f} MHz'.format(min_center_freq / 1e6))
logger.info('max_center_freq: {:.3f} MHz'.format(max_center_freq / 1e6))
logger.info('min_freq (after crop): {:.3f} MHz'.format((min_center_freq - (hop_size / 2)) / 1e6))
logger.info('max_freq (after crop): {:.3f} MHz'.format((max_center_freq + (hop_size / 2)) / 1e6))
logger.debug('Frequency hops table:')
logger.debug(' {:8s} {:8s} {:8s}'.format('Min:', 'Center:', 'Max:'))
for f in freq_list:
logger.debug(' {:8.3f} MHz {:8.3f} MHz {:8.3f} MHz'.format(
(f - (self.device.sample_rate / 2)) / 1e6,
f / 1e6,
(f + (self.device.sample_rate / 2)) / 1e6,
))
return freq_list
|
python
|
{
"resource": ""
}
|
q276992
|
SoapyPower.create_buffer
|
test
|
def create_buffer(self, bins, repeats, base_buffer_size, max_buffer_size=0):
"""Create buffer for reading samples"""
samples = bins * repeats
buffer_repeats = 1
buffer_size = math.ceil(samples / base_buffer_size) * base_buffer_size
if not max_buffer_size:
# Max buffer size about 100 MB
max_buffer_size = (100 * 1024**2) / 8
if max_buffer_size > 0:
max_buffer_size = math.ceil(max_buffer_size / base_buffer_size) * base_buffer_size
if buffer_size > max_buffer_size:
logger.warning('Required buffer size ({}) will be shrinked to max_buffer_size ({})!'.format(
buffer_size, max_buffer_size
))
buffer_repeats = math.ceil(buffer_size / max_buffer_size)
buffer_size = max_buffer_size
logger.info('repeats: {}'.format(repeats))
logger.info('samples: {} (time: {:.5f} s)'.format(samples, samples / self.device.sample_rate))
if max_buffer_size > 0:
logger.info('max_buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
max_buffer_size, max_buffer_size / bins, max_buffer_size / self.device.sample_rate
))
else:
logger.info('max_buffer_size (samples): UNLIMITED')
logger.info('buffer_size (samples): {} (repeats: {:.2f}, time: {:.5f} s)'.format(
buffer_size, buffer_size / bins, buffer_size / self.device.sample_rate
))
logger.info('buffer_repeats: {}'.format(buffer_repeats))
return (buffer_repeats, zeros(buffer_size, numpy.complex64))
|
python
|
{
"resource": ""
}
|
q276993
|
SoapyPower.setup
|
test
|
def setup(self, bins, repeats, base_buffer_size=0, max_buffer_size=0, fft_window='hann',
fft_overlap=0.5, crop_factor=0, log_scale=True, remove_dc=False, detrend=None,
lnb_lo=0, tune_delay=0, reset_stream=False, max_threads=0, max_queue_size=0):
"""Prepare samples buffer and start streaming samples from device"""
if self.device.is_streaming:
self.device.stop_stream()
base_buffer = self.device.start_stream(buffer_size=base_buffer_size)
self._bins = bins
self._repeats = repeats
self._base_buffer_size = len(base_buffer)
self._max_buffer_size = max_buffer_size
self._buffer_repeats, self._buffer = self.create_buffer(
bins, repeats, self._base_buffer_size, self._max_buffer_size
)
self._tune_delay = tune_delay
self._reset_stream = reset_stream
self._psd = psd.PSD(bins, self.device.sample_rate, fft_window=fft_window, fft_overlap=fft_overlap,
crop_factor=crop_factor, log_scale=log_scale, remove_dc=remove_dc, detrend=detrend,
lnb_lo=lnb_lo, max_threads=max_threads, max_queue_size=max_queue_size)
self._writer = writer.formats[self._output_format](self._output)
|
python
|
{
"resource": ""
}
|
q276994
|
SoapyPower.stop
|
test
|
def stop(self):
"""Stop streaming samples from device and delete samples buffer"""
if not self.device.is_streaming:
return
self.device.stop_stream()
self._writer.close()
self._bins = None
self._repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._buffer_repeats = None
self._buffer = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
|
python
|
{
"resource": ""
}
|
q276995
|
SoapyPower.psd
|
test
|
def psd(self, freq):
"""Tune to specified center frequency and compute Power Spectral Density"""
if not self.device.is_streaming:
raise RuntimeError('Streaming is not initialized, you must run setup() first!')
# Tune to new frequency in main thread
logger.debug(' Frequency hop: {:.2f} Hz'.format(freq))
t_freq = time.time()
if self.device.freq != freq:
# Deactivate streaming before tuning
if self._reset_stream:
self.device.device.deactivateStream(self.device.stream)
# Actually tune to new center frequency
self.device.freq = freq
# Reactivate straming after tuning
if self._reset_stream:
self.device.device.activateStream(self.device.stream)
# Delay reading samples after tuning
if self._tune_delay:
t_delay = time.time()
while True:
self.device.read_stream()
t_delay_end = time.time()
if t_delay_end - t_delay >= self._tune_delay:
break
logger.debug(' Tune delay: {:.3f} s'.format(t_delay_end - t_delay))
else:
logger.debug(' Same frequency as before, tuning skipped')
psd_state = self._psd.set_center_freq(freq)
t_freq_end = time.time()
logger.debug(' Tune time: {:.3f} s'.format(t_freq_end - t_freq))
for repeat in range(self._buffer_repeats):
logger.debug(' Repeat: {}'.format(repeat + 1))
# Read samples from SDR in main thread
t_acq = time.time()
acq_time_start = datetime.datetime.utcnow()
self.device.read_stream_into_buffer(self._buffer)
acq_time_stop = datetime.datetime.utcnow()
t_acq_end = time.time()
logger.debug(' Acquisition time: {:.3f} s'.format(t_acq_end - t_acq))
# Start FFT computation in another thread
self._psd.update_async(psd_state, numpy.copy(self._buffer))
t_final = time.time()
if _shutdown:
break
psd_future = self._psd.result_async(psd_state)
logger.debug(' Total hop time: {:.3f} s'.format(t_final - t_freq))
return (psd_future, acq_time_start, acq_time_stop)
|
python
|
{
"resource": ""
}
|
q276996
|
SoapyPower.sweep
|
test
|
def sweep(self, min_freq, max_freq, bins, repeats, runs=0, time_limit=0, overlap=0,
fft_window='hann', fft_overlap=0.5, crop=False, log_scale=True, remove_dc=False, detrend=None, lnb_lo=0,
tune_delay=0, reset_stream=False, base_buffer_size=0, max_buffer_size=0, max_threads=0, max_queue_size=0):
"""Sweep spectrum using frequency hopping"""
self.setup(
bins, repeats, base_buffer_size, max_buffer_size,
fft_window=fft_window, fft_overlap=fft_overlap, crop_factor=overlap if crop else 0,
log_scale=log_scale, remove_dc=remove_dc, detrend=detrend, lnb_lo=lnb_lo, tune_delay=tune_delay,
reset_stream=reset_stream, max_threads=max_threads, max_queue_size=max_queue_size
)
try:
freq_list = self.freq_plan(min_freq - lnb_lo, max_freq - lnb_lo, bins, overlap)
t_start = time.time()
run = 0
while not _shutdown and (runs == 0 or run < runs):
run += 1
t_run_start = time.time()
logger.debug('Run: {}'.format(run))
for freq in freq_list:
# Tune to new frequency, acquire samples and compute Power Spectral Density
psd_future, acq_time_start, acq_time_stop = self.psd(freq)
# Write PSD to stdout (in another thread)
self._writer.write_async(psd_future, acq_time_start, acq_time_stop,
len(self._buffer) * self._buffer_repeats)
if _shutdown:
break
# Write end of measurement marker (in another thread)
write_next_future = self._writer.write_next_async()
t_run = time.time()
logger.debug(' Total run time: {:.3f} s'.format(t_run - t_run_start))
# End measurement if time limit is exceeded
if time_limit and (time.time() - t_start) >= time_limit:
logger.info('Time limit of {} s exceeded, completed {} runs'.format(time_limit, run))
break
# Wait for last write to be finished
write_next_future.result()
# Debug thread pool queues
logging.debug('Number of USB buffer overflow errors: {}'.format(self.device.buffer_overflow_count))
logging.debug('PSD worker threads: {}'.format(self._psd._executor._max_workers))
logging.debug('Max. PSD queue size: {} / {}'.format(self._psd._executor.max_queue_size_reached,
self._psd._executor.max_queue_size))
logging.debug('Writer worker threads: {}'.format(self._writer._executor._max_workers))
logging.debug('Max. Writer queue size: {} / {}'.format(self._writer._executor.max_queue_size_reached,
self._writer._executor.max_queue_size))
finally:
# Shutdown SDR
self.stop()
t_stop = time.time()
logger.info('Total time: {:.3f} s'.format(t_stop - t_start))
|
python
|
{
"resource": ""
}
|
q276997
|
SMBus._set_addr
|
test
|
def _set_addr(self, addr):
"""private helper method"""
if self._addr != addr:
ioctl(self._fd, SMBUS.I2C_SLAVE, addr)
self._addr = addr
|
python
|
{
"resource": ""
}
|
q276998
|
run_cmake
|
test
|
def run_cmake(arg=""):
"""
Forcing to run cmake
"""
if ds.find_executable('cmake') is None:
print "CMake is required to build zql"
print "Please install cmake version >= 2.8 and re-run setup"
sys.exit(-1)
print "Configuring zql build with CMake.... "
cmake_args = arg
try:
build_dir = op.join(op.split(__file__)[0], 'build')
dd.mkpath(build_dir)
os.chdir("build")
ds.spawn(['cmake', '..'] + cmake_args.split())
ds.spawn(['make', 'clean'])
ds.spawn(['make'])
os.chdir("..")
except ds.DistutilsExecError:
print "Error while running cmake"
print "run 'setup.py build --help' for build options"
print "You may also try editing the settings in CMakeLists.txt file and re-running setup"
sys.exit(-1)
|
python
|
{
"resource": ""
}
|
q276999
|
Filter.filter
|
test
|
def filter(cls, datetimes, number, now=None, **options):
"""Return a set of datetimes, after filtering ``datetimes``.
The result will be the ``datetimes`` which are ``number`` of
units before ``now``, until ``now``, with approximately one
unit between each of them. The first datetime for any unit is
kept, later duplicates are removed.
If there are ``datetimes`` after ``now``, they will be
returned unfiltered.
"""
if not isinstance(number, int) or number < 0:
raise ValueError('Invalid number: %s' % number)
datetimes = tuple(datetimes)
# Sample the first datetime to see if it is timezone-aware
tzinfo = None
if datetimes and datetimes[0].tzinfo is not None:
tzinfo = UTC()
if now is None:
now = datetime.now(tzinfo)
if not hasattr(now, 'second'):
# now looks like a date, so convert it into a datetime
now = datetime.combine(now, time(23, 59, 59, 999999, tzinfo=tzinfo))
# Always keep datetimes from the future
future = set(dt for dt in datetimes if dt > now)
if number == 0:
return future
# Don't consider datetimes from before the start
start = cls.start(now, number, **options)
valid = (dt for dt in datetimes if start <= dt <= now)
# Deduplicate datetimes with the same mask() value by keeping
# the oldest.
kept = {}
for dt in sorted(valid):
kept.setdefault(cls.mask(dt, **options), dt)
return set(kept.values()) | future
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.