docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Asynchronously get data from Chunked transfer encoding of https://smartcity.rbccps.org/api/0.1.0/subscribe.
(Only this function requires Python 3. Rest of the functions can be run in python2.
Args:
url (string): url to subscribe
|
async def asynchronously_get_data(self, url):
headers = {"apikey": self.entity_api_key}
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
async with session.get(url, headers=headers, timeout=3000) as response:
while True: # loop over for each chunk of data
chunk = await response.content.readchunk()
if not chunk:
break
if platform == "linux" or platform == "linux2": # In linux systems, readchunk() returns a tuple
chunk = chunk[0]
resp = dict()
resp["data"] = chunk.decode()
current_milli_time = lambda: int(round(time() * 1000))
resp["timestamp"] = str(current_milli_time())
self.subscribe_data = resp
except Exception as e:
print("\n********* Oops: " + url + " " + str(type(e)) + str(e) + " *********\n")
print('\n********* Closing TCP: {} *********\n'.format(url))
| 907,744 |
Copies a file from its location on the web to a designated
place on the local machine.
Args:
file_path: Complete url of the file to copy, string (e.g. http://fool.com/input.css).
target_path: Path and name of file on the local machine, string. (e.g. /directory/output.css)
Returns:
None.
|
def copy_web_file_to_local(file_path, target_path):
response = urllib.request.urlopen(file_path)
f = open(target_path, 'w')
f.write(response.read())
f.close()
| 907,842 |
Counts the number of lines in a file.
Args:
fname: string, name of the file.
Returns:
integer, the number of lines in the file.
|
def get_line_count(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
| 907,843 |
Indentes css that has not been indented and saves it to a new file.
A new file is created if the output destination does not already exist.
Args:
f: string, path to file.
output: string, path/name of the output file (e.g. /directory/output.css).
print type(response.read())
Returns:
None.
|
def indent_css(f, output):
line_count = get_line_count(f)
f = open(f, 'r+')
output = open(output, 'r+')
for line in range(line_count):
string = f.readline().rstrip()
if len(string) > 0:
if string[-1] == ";":
output.write(" " + string + "\n")
else:
output.write(string + "\n")
output.close()
f.close()
| 907,844 |
Adds line breaks after every occurance of a given character in a file.
Args:
f: string, path to input file.
output: string, path to output file.
Returns:
None.
|
def add_newlines(f, output, char):
line_count = get_line_count(f)
f = open(f, 'r+')
output = open(output, 'r+')
for line in range(line_count):
string = f.readline()
string = re.sub(char, char + '\n', string)
output.write(string)
| 907,845 |
Adds a space before a character if there's isn't one already.
Args:
char: string, character that needs a space before it.
input_file: string, path to file to parse.
output_file: string, path to destination file.
Returns:
None.
|
def add_whitespace_before(char, input_file, output_file):
line_count = get_line_count(input_file)
input_file = open(input_file, 'r')
output_file = open(output_file, 'r+')
for line in range(line_count):
string = input_file.readline()
# If there's not already a space before the character, add one
if re.search(r'[a-zA-Z0-9]' + char, string) != None:
string = re.sub(char, ' ' + char, string)
output_file.write(string)
input_file.close()
| 907,846 |
Reformats poorly written css. This function does not validate or fix errors in the code.
It only gives code the proper indentation.
Args:
input_file: string, path to the input file.
output_file: string, path to where the reformatted css should be saved. If the target file
doesn't exist, a new file is created.
Returns:
None.
|
def reformat_css(input_file, output_file):
# Number of lines in the file.
line_count = get_line_count(input_file)
# Open source and target files.
f = open(input_file, 'r+')
output = open(output_file, 'w')
# Loop over every line in the file.
for line in range(line_count):
# Eliminate whitespace at the beginning and end of lines.
string = f.readline().strip()
# New lines after {
string = re.sub('\{', '{\n', string)
# New lines after ;
string = re.sub('; ', ';', string)
string = re.sub(';', ';\n', string)
# Eliminate whitespace before comments
string = re.sub('} /*', '}/*', string)
# New lines after }
string = re.sub('\}', '}\n', string)
# New lines at the end of comments
string = re.sub('\*/', '*/\n', string)
# Write to the output file.
output.write(string)
# Close the files.
output.close()
f.close()
# Indent the css.
indent_css(output_file, output_file)
# Make sure there's a space before every {
add_whitespace_before("{", output_file, output_file)
| 907,847 |
Checks if a string is an integer. If the string value is an integer
return True, otherwise return False.
Args:
string: a string to test.
Returns:
boolean
|
def is_int(string):
try:
a = float(string)
b = int(a)
except ValueError:
return False
else:
return a == b
| 907,848 |
Take a list of strings and clear whitespace
on each one. If a value in the list is not a
string pass it through untouched.
Args:
iterable: mixed list
Returns:
mixed list
|
def clean_strings(iterable):
retval = []
for val in iterable:
try:
retval.append(val.strip())
except(AttributeError):
retval.append(val)
return retval
| 907,850 |
Uses Heron's formula to find the area of a triangle
based on the coordinates of three points.
Args:
point1: list or tuple, the x y coordinate of point one.
point2: list or tuple, the x y coordinate of point two.
point3: list or tuple, the x y coordinate of point three.
Returns:
The area of a triangle as a floating point number.
Requires:
The math module, point_distance().
|
def triangle_area(point1, point2, point3):
a = point_distance(point1, point2)
b = point_distance(point1, point3)
c = point_distance(point2, point3)
s = (a + b + c) / 2.0
return math.sqrt(s * (s - a) * (s - b) * (s - c))
| 907,853 |
Calculates the area of a regular polygon (with sides of equal length).
Args:
number_of_sides: Integer, the number of sides of the polygon
length_of_sides: Integer or floating point number, the length of the sides
Returns:
The area of a regular polygon as an integer or floating point number
Requires:
The math module
|
def regular_polygon_area(number_of_sides, length_of_sides):
return (0.25 * number_of_sides * length_of_sides ** 2) / math.tan(
math.pi / number_of_sides
)
| 907,854 |
Calculates the median of a list of integers or floating point numbers.
Args:
data: A list of integers or floating point numbers
Returns:
Sorts the list numerically and returns the middle number if the list has an odd number
of items. If the list contains an even number of items the mean of the two middle numbers
is returned.
|
def median(data):
ordered = sorted(data)
length = len(ordered)
if length % 2 == 0:
return (
ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)]
) / 2.0
elif length % 2 != 0:
return ordered[math.floor(length / 2)]
| 907,855 |
Calculates the average or mean of a list of numbers
Args:
numbers: a list of integers or floating point numbers.
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
The average (mean) of the numbers as a floating point number
or a Decimal object.
Requires:
The math module
|
def average(numbers, numtype='float'):
if type == 'decimal':
return Decimal(sum(numbers)) / len(numbers)
else:
return float(sum(numbers)) / len(numbers)
| 907,856 |
Calculate net take-home pay including employer retirement savings match
using the formula laid out by Mr. Money Mustache:
http://www.mrmoneymustache.com/2015/01/26/calculating-net-worth/
Args:
take_home_pay: float or int, monthly take-home pay
spending: float or int, monthly spending
numtype: string, 'decimal' or 'float'; the type of number to return.
Returns:
your monthly savings rate expressed as a percentage.
|
def savings_rate(take_home_pay, spending, numtype='float'):
if numtype == 'decimal':
try:
return (
(Decimal(take_home_pay) - Decimal(spending)) / (Decimal(take_home_pay))
) * Decimal(100.0)
# Leave InvalidOperation for backwards compatibility
except (InvalidOperation, DivisionByZero):
return Decimal(0.0)
else:
try:
return (
(float(take_home_pay) - float(spending)) / (float(take_home_pay))
) * 100.0
except (ZeroDivisionError):
return 0.0
| 907,860 |
Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent
|
def reply(self,message,message_type):
if message_type == MULTIPART:
raise Exception("Unsupported reply type")
super(Replier,self).send(message,message_type)
| 908,283 |
Parse a GPX file into a GpxModel.
Args:
gpx_element: The root <gpx> element of an XML document containing a
version attribute. GPX versions 1.0 and 1.1 are supported.
gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited
by curly braces).
Returns:
A GpxModel representing the data from the supplies xml.
Raises:
ValueError: The supplied XML could not be parsed as GPX.
|
def parse_gpx(gpx_element, gpxns=None):
gpxns = gpxns if gpxns is not None else determine_gpx_namespace(gpx_element)
if gpx_element.tag != gpxns+'gpx':
raise ValueError("No gpx root element")
version = gpx_element.attrib['version']
if version == '1.0':
return parse_gpx_1_0(gpx_element, gpxns=gpxns)
elif version == '1.1':
return parse_gpx_1_1(gpx_element, gpxns=gpxns)
else:
raise ValueError("Cannot parse GPX version {0}".format(version))
| 908,441 |
Add the args
Args:
args (namespace): The commandline args
|
def add_args(self, args):
for key, value in vars(args).items():
if value is not None:
setattr(self, key.upper(), value)
| 908,644 |
Load the contents from the ini file
Args:
ini_file (str): The file from which the settings should be loaded
|
def load_ini(self, ini_file):
if ini_file and not os.path.exists(ini_file):
self.log.critical(f"Settings file specified but not found. {ini_file}")
sys.exit(1)
if not ini_file:
ini_file = f"{self.cwd}/settings.ini"
if os.path.exists(ini_file):
config = configparser.RawConfigParser(allow_no_value=True)
config.read(ini_file)
for key, value in self.spec.items():
entry = None
if value['type'] == str:
entry = config.get("settings", option=key.lower(), fallback=None)
elif value['type'] == bool:
entry = config.getboolean("settings", option=key.lower(), fallback=None)
elif value['type'] == int:
entry = config.getint("settings", option=key.lower(), fallback=None)
elif value['type'] == float:
entry = config.getfloat("settings", option=key.lower(), fallback=None)
elif value['type'] in [list, dict]:
entries = config.get("settings", option=key.lower(), fallback=None)
if entries:
try:
entry = json.loads(entries)
except json.decoder.JSONDecodeError as _err: #pragma: no cover
self.log.critical(f"Error parsing json from ini file. {entries}")
sys.exit(1)
if entry is not None:
setattr(self, key.upper(), entry)
| 908,645 |
Initialization Function
Args:
epsilon (str): The epsilon symbol
alphabet (list): The DFA Alphabet
Returns:
None
|
def __init__(
self,
epsilon,
alphabet=None):
self.bookeeping = None
self.groups = None
self.epsilon = epsilon
if alphabet is None:
alphabet = createalphabet()
self.alphabet = alphabet
| 908,766 |
Find state access strings (DFA shortest paths for every state)
using BFS
Args:
graph (DFA): The DFA states
start (int): The DFA initial state
Return:
list: A list of all the DFA shortest paths for every state
|
def _bfs_path_states(self, graph, start):
pathstates = {}
# maintain a queue of paths
queue = []
visited = []
# push the first path into the queue
queue.append([['', start]])
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1][1]
# path found """
if node.stateid not in pathstates and node.stateid != len(list(graph.states)):
pathstates[node.stateid] = ''.join(
[mnode[0] for mnode in path])
visited.append(node.stateid)
# enumerate all adjacent nodes, construct a new path and push it
# into the queue
for arc in node.arcs:
char = graph.isyms.find(arc.ilabel)
next_state = graph[arc.nextstate]
if next_state.stateid not in visited:
new_path = list(path)
new_path.append([char, next_state])
queue.append(new_path)
return pathstates
| 908,767 |
Find the accepted states
Args:
graph (DFA): The DFA states
Return:
list: Returns the list of the accepted states
|
def _get_accepted(self, graph):
accepted = []
for state in graph.states:
if state.final != TropicalWeight(float('inf')):
accepted.append(state)
return accepted
| 908,768 |
Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent
|
def push(self,message,message_type):
super(Producer,self).send(message,message_type)
| 908,803 |
Iter over a config and raise if a required option is still not set.
Args:
config (confpy.core.config.Configuration): The configuration object
to validate.
Raises:
MissingRequiredOption: If any required options are not set in the
configuration object.
Required options with default values are considered set and will not cause
this function to raise.
|
def check_for_missing_options(config):
for section_name, section in config:
for option_name, option in section:
if option.required and option.value is None:
raise exc.MissingRequiredOption(
"Option {0} in namespace {1} is required.".format(
option_name,
section_name,
)
)
return config
| 909,354 |
maintain a map of states distance using BFS
Args:
start (fst state): The initial DFA state
Returns:
list: An ordered list of DFA states
using path distance
|
def _bfs_sort(self, start):
pathstates = {}
# maintain a queue of nodes to be visited. Both current and previous
# node must be included.
queue = []
# push the first path into the queue
queue.append([0, start])
pathstates[start.stateid] = 0
while queue:
# get the first node from the queue
leaf = queue.pop(0)
node = leaf[1]
pathlen = leaf[0]
# enumerate all adjacent nodes, construct a new path and push it
# into the queue
for arc in node.arcs:
next_state = self.mma[arc.nextstate]
if next_state.stateid not in pathstates:
queue.append([pathlen + 1, next_state])
pathstates[next_state.stateid] = pathlen + 1
orderedstatesdict = OrderedDict(
sorted(
pathstates.items(),
key=lambda x: x[1],
reverse=False))
for state in self.mma.states:
orderedstatesdict[state.stateid] = state
orderedstates = [x[1] for x in list(orderedstatesdict.items())]
return orderedstates
| 909,487 |
Kleene star operation
Args:
input_string (str): The string that the kleene star will be made
Returns:
str: The applied Kleene star operation on the input string
|
def star(self, input_string):
if input_string != self.epsilon and input_string != self.empty:
return "(" + input_string + ")*"
else:
return ""
| 909,488 |
# - Remove all the POP (type - 2) transitions to state 0,non DFA accepted
# for symbol @closing
# - Generate the accepted transitions
- Replace DFA accepted States with a push - pop symbol and two extra states
Args:
statediag (list): The states of the PDA
dfaaccepted (list):The list of DFA accepted states
Returns:
list: A cleaned, smaller list of DFA states
|
def get(self, statediag, dfaaccepted):
newstatediag = {}
newstate = PDAState()
newstate.id = 'AI,I' # BECAREFUL WHEN SIMPLIFYING...
newstate.type = 1
newstate.sym = '@wrapping'
transitions = {}
transitions[(0, 0)] = [0]
newstate.trans = transitions
i = 0
newstatediag[i] = newstate
# print 'accepted:'
# print dfaaccepted
for stateid in statediag:
state = statediag[stateid]
# print state.id
if state.type == 2:
for state2id in dfaaccepted:
# print state.id[1]
if state.id[1] == state2id:
# print 'adding...'
state.trans['AI,I'] = ['@wrapping']
# print state.trans
break
i = i + 1
newstatediag[i] = state
return newstatediag
| 909,549 |
Performs BFS operation for eliminating useless loop transitions
Args:
graph (PDA): the PDA object
start (PDA state): The PDA initial state
Returns:
list: A cleaned, smaller list of DFA states
|
def bfs(self, graph, start):
newstatediag = {}
# maintain a queue of paths
queue = []
visited = []
# push the first path into the queue
queue.append(start)
while queue:
# get the first path from the queue
state = queue.pop(0)
# get the last node from the path
# visited
visited.append(state.id)
# enumerate all adjacent nodes, construct a new path and push it
# into the queue
for key in state.trans:
if state.trans[key] != []:
if key not in visited:
for nextstate in graph:
if graph[nextstate].id == key:
queue.append(graph[nextstate])
break
i = 0
for state in graph:
if graph[state].id in visited:
newstatediag[i] = graph[state]
i = i + 1
return newstatediag
| 909,550 |
Replaces complex state IDs as generated from the product operation,
into simple sequencial numbers. A dictionaty is maintained in order
to map the existed IDs.
Args:
statediag (list): The states of the PDA
accepted (list): the list of DFA accepted states
Returns:
list:
|
def get(self, statediag, accepted=None):
count = 0
statesmap = {}
newstatediag = {}
for state in statediag:
# Simplify state IDs
if statediag[state].id not in statesmap:
statesmap[statediag[state].id] = count
mapped = count
count = count + 1
else:
mapped = statesmap[statediag[state].id]
# Simplify transitions IDs
transitions = {}
for nextstate in statediag[state].trans:
if nextstate not in statesmap:
statesmap[nextstate] = count
transmapped = count
count = count + 1
else:
transmapped = statesmap[nextstate]
transitions[transmapped] = statediag[state].trans[nextstate]
newstate = PDAState()
newstate.id = mapped
newstate.type = statediag[state].type
newstate.sym = statediag[state].sym
newstate.trans = transitions
newstatediag[mapped] = newstate
newaccepted = None
if accepted is not None:
newaccepted = []
for accepted_state in accepted :
if (0, accepted_state) in statesmap:
newaccepted.append(statesmap[(0, accepted_state)])
return newstatediag, count, newaccepted
| 909,552 |
Find the biggest State ID
Args:
statediag (list): The states of the PDA
thebiggestid (int): The binggest state identifier
Returns:
None
|
def __init__(self, statediag=[], thebiggestid=None):
self.statediag = []
self.quickresponse = {}
self.quickresponse_types = {}
self.toadd = []
self.biggestid = 0
if thebiggestid is None:
for state in statediag:
if statediag[state].id > self.biggestid:
self.biggestid = statediag[state].id
else:
self.biggestid = thebiggestid
self.statediag = statediag
| 909,553 |
Creates a new POP state (type - 2) with the same transitions.
The POPed symbol is the unique number of the state.
Args:
trans (dict): Transition dictionary
Returns:
Int: The state identifier
|
def _generate_state(self, trans):
state = PDAState()
state.id = self.nextstate()
state.type = 2
state.sym = state.id
state.trans = trans.copy()
self.toadd.append(state)
return state.id
| 909,554 |
For each state qi of the PDA, we add the rule Aii -> e
For each triplet of states qi, qj and qk, we add the rule Aij -> Aik Akj.
Args:
optimized (bool): Enable or Disable optimization - Do not produce O(n^3)
|
def insert_self_to_empty_and_insert_all_intemediate(self, optimized):
for state_a in self.statediag:
self.rules.append('A' +repr(state_a.id) +',' + repr(state_a.id) + ': @empty_set')
# If CFG is not requested, avoid the following O(n^3) rule.
# It can be solved and a string can be generated faster with BFS of DFS
if optimized == 0:
for state_b in self.statediag:
if state_b.id != state_a.id:
for state_c in self.statediag:
if state_c.id != state_a.id \
and state_b.id != state_c.id:
self.rules.append('A' + repr(state_a.id)
+ ',' + repr(state_c.id)
+ ': A' + repr(state_a.id)
+ ',' + repr(state_b.id)
+ ' A' + repr(state_b.id)
+ ',' + repr(state_c.id)
+ '')
| 909,556 |
Generates a new random string from the start symbol
Args:
None
Returns:
str: The generated string
|
def generate(self):
result = self._gen(self.optimized, self.splitstring)
if self.splitstring and result is not None:
result = result[1:]
return result
| 909,810 |
Because of the optimization, the rule for empty states is missing
A check takes place live
Args:
stateid (int): The state identifier
Returns:
bool: A true or false response
|
def _check_self_to_empty(self, stateid):
x_term = stateid.rfind('@')
y_term = stateid.rfind('A')
if y_term > x_term:
x_term = y_term
ids = stateid[x_term + 1:].split(',')
if len(ids) < 2:
return 0
if ids[0] == ids[1]:
# print 'empty'
return 1
return 0
| 909,812 |
Generates a new random object generated from the nonterminal
Args:
optimized (bool): mode of operation - if enabled not all
CNF rules are included (mitigate O(n^3))
splitstring (bool): A boolean for enabling or disabling
Returns:
str: The generated string
|
def _gen(self, optimized, splitstring):
# Define Dictionary that holds resolved rules
# (only in form A -> terminals sequence)
self.resolved = {}
# First update Resolved dictionary by adding rules
# that contain only terminals (resolved rules)
for nt in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nt]:
if self.grammar.grammar_rules[i][0] not in self.resolved\
and not isinstance(self.grammar.grammar_rules[i][1], (set, tuple)):
if self.grammar.grammar_rules[i][1] != '@empty_set' \
and self.grammar.grammar_rules[i][1] in self.grammar.grammar_terminals:
if splitstring:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
else:
if self.grammar.grammar_rules[i][1] == '&':
self.resolved[self.grammar.grammar_rules[i][0]] = ' '
else:
self.resolved[
self.grammar.grammar_rules[i][0]] = self.grammar.grammar_rules[i][1]
# print 'ResolvingA '+self.g.Rules[i][0]+": "+
# self.g.Rules[i][1]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if self.grammar.grammar_rules[i][1] == '@empty_set':
self.resolved[self.grammar.grammar_rules[i][0]] = ''
# print 'ResolvingB '+self.g.Rules[i][0]+": "
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
if optimized and self._check_self_to_empty(
self.grammar.grammar_rules[i][1]):
self.resolved[self.grammar.grammar_rules[i][0]] = ''
# print 'ResolvingC '+self.g.Rules[i][0]+": "
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
# Then try to use the rules from Resolved dictionary and check
# if there is another rule that can be resolved.
# This should be done in a while loop
change = 1
while change:
change = 0
if not change:
ret = self._check_self_nonterminals(optimized)
if ret == 1:
change = 1
elif ret != 0:
return ret
if not change:
while not change and len(self.bfs_queue) > 0:
myntr = self.bfs_queue.pop()
ret = self._check_self_replicate(myntr)
if ret == 1:
change = 1
elif ret != 0:
return ret
if optimized and self._check_intemediate(
myntr, self.maxstate):
change = 1
break
| 909,816 |
Currently this compiler simply returns an interpreter instead of compiling
TODO: Write this compiler to increase LPProg run speed and to prevent exceeding maximum recursion depth
Args:
prog (str): A string containing the program.
features (FeatureSet): The set of features to enable during compilation.
Returns:
LPProg
|
def compile(self, prog, features=Features.ALL):
return LPProg(Parser(Tokenizer(prog, features), features).program(), features)
| 910,218 |
Convert any value into a string value.
Args:
value (any): The value to coerce.
Returns:
str: The string representation of the value.
|
def coerce(self, value):
if isinstance(value, compat.basestring):
return value
return str(value)
| 910,511 |
Initialize the option with a regex pattern.
Args:
pattern (str): The regex pattern to match against.
*args: Any position arguments required by base classes.
**kwargs: Any keyword arguments required by base classes.
Raises:
ValueError: If a pattern is not given.
TypeError: If the pattern is not a string.
|
def __init__(self, pattern=None, *args, **kwargs):
super(PatternOption, self).__init__(*args, **kwargs)
if pattern is None:
raise ValueError("The pattern cannot be None.")
self._pattern = pattern
self._re = re.compile(pattern)
| 910,512 |
Convert a value into a pattern matched string value.
All string values are matched against a regex before they are
considered acceptable values.
Args:
value (any): The value to coerce.
Raises:
ValueError: If the value is not an acceptable value.
Returns:
str: The pattern matched value represented.
|
def coerce(self, value):
if not isinstance(value, compat.basestring):
value = str(value)
if not self._re.match(value):
raise ValueError(
"The value {0} does not match the pattern {1}".format(
value,
self.pattern,
)
)
return value
| 910,513 |
Verify some json.
Args:
schema - the description of a general-case 'valid' json object.
data - the json data to verify.
Returns:
bool: True if data matches the schema, False otherwise.
Raises:
TypeError:
If the schema is of an unknown data type.
ValueError:
If the schema contains a string with an invalid value.
If the schema attempts to reference a non-existent named schema.
|
def check(schema, data, trace=False):
if trace == True:
trace = 1
else:
trace = None
return _check(schema, data, trace=trace)
| 910,591 |
Generate s3 application bucket name.
Args:
include_region (bool): Include region in the name generation.
|
def s3_app_bucket(self, include_region=False):
if include_region:
s3_app_bucket = self.format['s3_app_region_bucket'].format(**self.data)
else:
s3_app_bucket = self.format['s3_app_bucket'].format(**self.data)
return s3_app_bucket
| 911,131 |
Generate shared s3 application bucket name.
Args:
include_region (bool): Include region in the name generation.
|
def shared_s3_app_bucket(self, include_region=False):
if include_region:
shared_s3_app_bucket = self.format['shared_s3_app_region_bucket'].format(**self.data)
else:
shared_s3_app_bucket = self.format['shared_s3_app_bucket'].format(**self.data)
return shared_s3_app_bucket
| 911,132 |
Initialization function
Args:
sid (int): The state identifier
Returns:
None
|
def __init__(self, sid=None):
self.final = False
self.initial = False
self.stateid = sid
self.arcs = []
| 911,407 |
The initialization function
Args:
srcstate_id (int): The source state identifier
nextstate_id (int): The destination state identifier
ilabel (str): The symbol corresponding to character for the transition
|
def __init__(self, srcstate_id, nextstate_id, ilabel=None):
self.srcstate = srcstate_id
self.nextstate = nextstate_id
self.ilabel = ilabel
| 911,408 |
Sets a symbol
Args:
char (str): The symbol character
num (int): The symbol identifier
Returns:
None
|
def __setitem__(self, char, num):
self.symbols[num] = char
self.reversesymbols[char] = num
| 911,409 |
Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None
|
def add_arc(self, src, dst, char):
# assert type(src) == type(int()) and type(dst) == type(int()), \
# "State type should be integer."
# assert char in self.I
#
#print self.states
#print src
for s_idx in [src, dst]:
if s_idx >= len(self.states):
for i in range(len(self.states), s_idx + 1):
self.states.append(DFAState(i))
for arc in self.states[src].arcs:
if arc.ilabel == self.isyms.__getitem__(char) or char == EPSILON:
self.nfa = True
break
self.states[src].arcs.append(
DFAArc(src, dst, self.isyms.__getitem__(char)))
| 911,413 |
Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None
|
def complement(self, alphabet):
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True
| 911,414 |
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
def init_from_acceptor(self, acceptor):
self.states = copy.deepcopy(acceptor.states)
self.alphabet = copy.deepcopy(acceptor.alphabet)
self.osyms = copy.deepcopy(acceptor.osyms)
self.isyms = copy.deepcopy(acceptor.isyms)
| 911,415 |
Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_file_name (str): The input file
Returns:
None
|
def load(self, txt_fst_file_name):
with open(txt_fst_file_name, 'r') as input_filename:
for line in input_filename:
line = line.strip()
split_line = line.split()
if len(split_line) == 1:
self[int(split_line[0])].final = True
else:
self.add_arc(int(split_line[0]), int(split_line[1]),
split_line[2].decode('hex'))
| 911,416 |
Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
|
def intersect(self, other):
operation = bool.__and__
self.cross_product(other, operation)
return self
| 911,417 |
Constructs an unminimized DFA recognizing
the symmetric difference of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the symmetric difference operation
Returns:
DFA: The resulting DFA
|
def symmetric_difference(self, other):
operation = bool.__xor__
self.cross_product(other, operation)
return self
| 911,418 |
Constructs an unminimized DFA recognizing the union of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the union operation
Returns:
DFA: The resulting DFA
|
def union(self, other):
operation = bool.__or__
self.cross_product(other, operation)
return self
| 911,419 |
Transforms a Non Deterministic DFA into a Deterministic
Args:
None
Returns:
DFA: The resulting DFA
Creating an equivalent DFA is done using the standard algorithm.
A nice description can be found in the book:
Harry R. Lewis and Christos H. Papadimitriou. 1998.
E
print target_dfa_statelements of the Theory of Computation.
|
def determinize(self):
# Compute the \epsilon-closure for all states and save it in a diagram
epsilon_closure = {}
for state in self.states:
sid = state.stateid
epsilon_closure[sid] = self._epsilon_closure(state)
# Get a transition diagram to speed up computations
trans_table = {}
for state in self.states:
trans_table[state.stateid] = defaultdict(set)
for arc in state:
char = self.isyms.find(arc.ilabel)
trans_table[state.stateid][char].add(arc.nextstate)
# is_final function:
# Given a set of nfa states representing a dfa_state return 1 if the
# corresponding DFA state is a final state, i.e. if any of the
# corresponding NFA states are final.
is_final = lambda nfa_states, dfa_state: True \
if sum([ int(nfa_states[x].final) for x in dfa_state ]) >= 1 \
else False
# Precomputation is over, start executing the conversion algorithm
state_idx = 1
nfa_states = copy.deepcopy(self.states)
self.states = []
# Initialize the new DFA state list
self.add_state()
new_initial = epsilon_closure[nfa_states[0].stateid]
self.states[0].final = is_final(nfa_states, new_initial)
dfa_state_idx_map = { frozenset(new_initial) : 0 }
stack = [new_initial]
while True:
# Iterate until all added DFA states are processed.
if not stack:
break
# This is a set of states from the NFA
src_dfa_state = stack.pop()
src_dfa_state_idx = dfa_state_idx_map[frozenset(src_dfa_state)]
for char in self.alphabet:
# Compute the set of target states
target_dfa_state = set([])
for nfa_state in src_dfa_state:
next_states = \
set([y for x in trans_table[nfa_state][char] \
for y in epsilon_closure[x] ])
target_dfa_state.update(next_states)
# If the computed state set is not part of our new DFA add it,
# along with the transition for the current character.
if frozenset(target_dfa_state) not in dfa_state_idx_map:
self.add_state()
dfa_state_idx_map[frozenset(target_dfa_state)] = state_idx
self.states[state_idx].final = is_final(nfa_states,
target_dfa_state)
state_idx += 1
stack.append(target_dfa_state)
dst_state_idx = dfa_state_idx_map[frozenset(target_dfa_state)]
self.add_arc(src_dfa_state_idx, dst_state_idx, char)
return self
| 911,421 |
Performs the Hopcroft minimization algorithm
Args:
None
Returns:
DFA: The minimized input DFA
|
def hopcroft(self):
def _getset(testset, partition):
for part in partition:
if set(testset) == set(part):
return True
return None
def _create_transitions_representation(graph):
return {x.stateid:{self.isyms.find(arc.ilabel): arc.nextstate \
for arc in x} for x in graph.states}
def _create_reverse_transitions_representation(graph):
return {x.stateid: {self.isyms.find(arc.ilabel): arc.nextstate \
for arc in x} for x in graph.states}
def _reverse_to_source(target, group1):
new_group = []
for dst in group1:
new_group += target[dst]
return set(new_group)
def _get_group_from_state(groups, sid):
for index, selectgroup in enumerate(groups):
if sid in selectgroup:
return index
def _delta(graph, cur_state, char):
for arc in cur_state.arcs:
if graph.isyms.find(arc.ilabel) == char:
return graph[arc.nextstate]
def _partition_group(bookeeping, group):
for (group1, group2) in bookeeping:
if group & group1 != set() and not group.issubset(group1):
new_g1 = group & group1
new_g2 = group - group1
return (new_g1, new_g2)
if group & group2 != set() and not group.issubset(group2):
new_g1 = group & group2
new_g2 = group - group2
return (new_g1, new_g2)
assert False, "Unmatched group partition"
def _object_set_to_state_list(objectset):
return [state.stateid for state in objectset]
def _get_accepted(graph):
return [state for state in graph \
if state.final != TropicalWeight(float('inf'))]
graph = self
# Find Q
set_q = set(_object_set_to_state_list(graph.states))
# We will work with states addresses here instead of states stateid for
# more convenience
set_f = set(_object_set_to_state_list(_get_accepted(graph)))
# Perform P := {F, Q-F}
set_nf = set_q.copy() - set_f.copy()
groups = [set_f.copy(), set_nf.copy()]
bookeeping = [(set_f, set_nf)]
done = False
while not done:
done = True
new_groups = []
for selectgroup in groups:
# _check for each letter if it splits the current group
for character in self.alphabet:
# print 'Testing symbol: ', c
target = defaultdict(list)
target_states = defaultdict(int)
new_g = [set(selectgroup)]
for sid in selectgroup:
# _check if all transitions using c are going in a state
# in the same group. If they are going on a different
# group then split
deststate = _delta(graph, graph[sid], character)
destgroup = _get_group_from_state(groups,
deststate.stateid)
target[destgroup].append(sid)
target_states[destgroup] = deststate.stateid
if len(target) > 1:
inv_target_states = {
v: k for k, v in target_states.iteritems()}
new_g = [set(selectedstate) for selectedstate in target.values()]
done = False
# Get all the partitions of destgroups
queue = [set([x for x in target_states.values()])]
while queue:
top = queue.pop(0)
(group1, group2) = _partition_group(bookeeping, top)
ng1 = _reverse_to_source(
target, [inv_target_states[x] for x in group1])
ng2 = _reverse_to_source(
target, [inv_target_states[x] for x in group2])
bookeeping.append((ng1, ng2))
if len(group1) > 1:
queue.append(group1)
if len(group2) > 1:
queue.append(group2)
break
new_groups += new_g
# End of iteration for the k-equivalence
# Assign new groups and check if any change occured
groups = new_groups
# Make a copy of the old states, and prepare the
# automaton to host the minimum states
oldstates = copy.deepcopy(self.states)
self.states = []
self.define()
def findpart(stateid, partitions):
for group in partitions:
if stateid in group:
return frozenset(group)
return frozenset(set( ))
def add_state_if_not_exists(group, statesmap, final):
if group not in statesmap:
sid = self.add_state()
self[sid].final = final
statesmap[group] = sid
return statesmap[group]
statesmap = {}
self.states = []
group = findpart(0, groups)
sid = add_state_if_not_exists(frozenset(list(group)), statesmap,
oldstates[0].final)
self[sid].initial = True
for group in groups:
if len(group) == 0:
continue
sid = add_state_if_not_exists(frozenset(group), statesmap,
oldstates[list(group)[0]].final)
state = next(iter(group))
for arc in oldstates[state]:
dst_group = findpart(arc.nextstate, groups)
dst_sid = add_state_if_not_exists(
dst_group, statesmap, oldstates[arc.nextstate].final)
self.add_arc(sid, dst_sid, graph.isyms.find(arc.ilabel))
| 911,423 |
A generalized cross-product constructor over two DFAs.
The third argument is a binary boolean function f; a state (q1, q2) in the final
DFA accepts if f(A[q1],A[q2]), where A indicates the acceptance-value of the state.
Args:
dfa_2: The second dfa
accept_method: The boolean action
Returns:
None
|
def cross_product(self, dfa_2, accept_method):
dfa_1states = copy.deepcopy(self.states)
dfa_2states = dfa_2.states
self.states = []
states = {}
def _create_transitions_representation(graph, state):
return {self.isyms.find(arc.ilabel): graph[arc.nextstate] for arc in state}
def _add_state_if_nonexistent(state_a, state_b):
if (state_a.stateid, state_b.stateid) not in states:
states[(state_a.stateid, state_b.stateid)] \
= self.add_state()
self[states[(state_a.stateid, state_b.stateid)]].initial \
= state_a.initial and state_b.initial
self[states[(state_a.stateid, state_b.stateid)]].final \
= accept_method(state_a.final, state_b.final)
return states[(state_a.stateid, state_b.stateid)]
for state1, state2 in product(dfa_1states, dfa_2states):
sid1 = _add_state_if_nonexistent(state1, state2)
transitions_s1 = _create_transitions_representation(dfa_1states, state1)
transitions_s2 = _create_transitions_representation(dfa_2states, state2)
for char in self.alphabet:
sid2 = _add_state_if_nonexistent(
transitions_s1[char], transitions_s2[char])
self.add_arc(sid1, sid2, char)
| 911,424 |
Copy files between diferent directories.
Copy one or more files to an existing directory. This function is
recursive, if the source is a directory, all its subdirectories are created
in the destination. Existing files in destination are overwrited without
any warning.
Args:
source (str): File or directory name.
dest (str): Directory name.
Raises:
FileNotFoundError: Destination directory doesn't exist.
|
def copy_rec(source, dest):
if os.path.isdir(source):
for child in os.listdir(source):
new_dest = os.path.join(dest, child)
os.makedirs(new_dest, exist_ok=True)
copy_rec(os.path.join(source, child), new_dest)
elif os.path.isfile(source):
logging.info(' Copy "{}" to "{}"'.format(source, dest))
shutil.copy(source, dest)
else:
logging.info(' Ignoring "{}"'.format(source))
| 911,439 |
Initialization function
Args:
sid (int): The state identifier
Returns:
None
|
def __init__(self, cur_fst, cur_node):
self.cur_node = cur_node
self.cur_fst = cur_fst
| 911,646 |
Adds a new Arc
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The character for the transition
Returns:
None
|
def add_arc(self, src, dst, char):
if src not in self.automaton.states():
self.add_state()
arc = fst.Arc(self.isyms[char], self.osyms[char], fst.Weight.One(self.automaton.weight_type()), dst)
self.automaton.add_arc(src, arc)
| 911,651 |
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
|
def fixminimized(self, alphabet):
insymbols = fst.SymbolTable()
outsymbols = fst.SymbolTable()
num = 1
for char in self.alphabet:
self.isyms.__setitem__(char, num)
self.osyms.__setitem__(char, num)
insymbols.add_symbol(char, num)
outsymbols.add_symbol(char, num)
num = num + 1
self.automaton.set_input_symbols(insymbols)
self.automaton.set_output_symbols(outsymbols)
endstate = self.add_state()
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = False
for char in alphabet:
self.add_arc(endstate, endstate, char)
| 911,652 |
Returns the complement of DFA
Args:
alphabet (list): The input alphabet
Returns:
None
|
def complement(self, alphabet):
self._addsink(alphabet)
for state in self.automaton.states():
if self.automaton.final(state) == fst.Weight.One(self.automaton.weight_type()):
self.automaton.set_final(state, fst.Weight.Zero(self.automaton.weight_type()))
else:
self.automaton.set_final(state, fst.Weight.One(self.automaton.weight_type()))
| 911,653 |
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
def init_from_acceptor_bycopying(self, acceptor):
for state in acceptor.states:
for arc in state.arcs:
self.add_arc(state.stateid, arc.nextstate, acceptor.isyms.find(arc.ilabel))
if state.final:
print state.stateid,' is final'
self[state.stateid].final = True;
| 911,654 |
Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
|
def intersect(self, other):
self.automaton = fst.intersect(self.automaton, other.automaton)
return self
| 911,655 |
Convert text values into boolean values.
True values are (case insensitive): 'yes', 'true', '1'. False values
are (case insensitive): 'no', 'false', '0'.
Args:
value (str or bool): The value to coerce.
Raises:
TypeError: If the value is not a bool or string.
ValueError: If the value is not bool or an acceptable value.
Returns:
bool: The True/False value represented.
|
def coerce(self, value):
if isinstance(value, bool):
return value
if not hasattr(value, 'lower'):
raise TypeError('Value is not bool or string.')
if value.lower() in ('yes', 'true', '1'):
return True
if value.lower() in ('no', 'false', '0'):
return False
raise ValueError('Could not coerce {0} to a bool.'.format(value))
| 911,721 |
Returns a string from the Diff resutl.
Depending on the method, either the string will
be generated directly from the PDA using the state
removal method, or the PDA will be first translated to
a CFG and then a string will be generated from the CFG
Args:
None
Returns:
A string from the Diff
|
def get_string(self):
return_string = None
if not self.mmc:
return ""
method = 'PDASTRING'
if method == 'PDASTRING':
stringgen = PdaString()
print '* Reduce PDA using DFA BFS (remove unreachable states):'
newpda = self.mmc.s
handle = IntersectionHandling()
newpda = handle.get(newpda, self.mmc.accepted)
reduce_b = ReducePDA()
newpda = reduce_b.get(newpda)
#simply = SimplifyStateIDs()
#newpda, biggestid, newaccepted = simply.get(
# newpda, self.mmc.accepted)
print "- Total PDA states after reduction are " + repr(len(newpda))
return_string = stringgen.init(newpda, self.mmc.accepted)
if return_string is not None:
return_string = return_string[0]
elif method == 'PDACFGSTRING':
optimized = 1
dt1 = datetime.datetime.fromtimestamp(time.time())
print '* Initiating PDA simplification'
print ' - Total PDA states are ' + repr(len(self.mmc.s))
handle = IntersectionHandling()
newpda = handle.get(self.mmc.s, self.mmc.accepted)
newpda = self.mmc.s
simply = SimplifyStateIDs()
newpda, biggestid, newaccepted = simply.get(
newpda, self.mmc.accepted)
print ' - Total PDA states after id clearence are ' + repr(len(newpda))
replace = ReadReplace(newpda, biggestid)
newpda = replace.replace_read()
print ' - Total PDA states after read elimination are ' + repr(len(newpda))
maxstate = replace.nextstate() - 1
print '* Reduce PDA using DFA BFS (remove unreachable states):'
reduce_b = ReducePDA()
newpda = reduce_b.get(newpda)
print "- Total PDA states after reduction are " + repr(len(newpda))
dt2 = datetime.datetime.fromtimestamp(time.time())
rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)
print "* PDA was simplyfied in %d days, %d hours, %d minutes and %d seconds" % (
rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)
dt1 = datetime.datetime.fromtimestamp(time.time())
print '* Initiating CNF from PDA generation'
cnfgenerator = PdaCnf(newpda, newaccepted)
dt2 = datetime.datetime.fromtimestamp(time.time())
rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)
print "* CNF was generated in %d days, %d hours, %d minutes and %d seconds" % (
rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)
dt1 = datetime.datetime.fromtimestamp(time.time())
print '* Initiating string from CFG generation'
grammar = cnfgenerator.get_rules(optimized)
print ' - Total grammar rules are ' + repr(len(grammar))
gen = CFGGenerator(CNFGenerator(grammar),
optimized=optimized,
splitstring=0,
maxstate=maxstate)
return_string = gen.generate()
dt2 = datetime.datetime.fromtimestamp(time.time())
rdelta = dateutil.relativedelta.relativedelta(dt2, dt1)
print "* A string was generated in %d days, %d hours, %d minutes and %d seconds" % (
rdelta.days, rdelta.hours, rdelta.minutes, rdelta.seconds)
print return_string
else:
return_string = None
return return_string
| 911,728 |
find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding
|
def find(self, binding_id, instance):
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding
| 911,750 |
Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters
|
def bind(self, binding, parameters):
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists()
| 911,751 |
Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
|
def unbind(self, binding):
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding)
| 911,752 |
Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
|
def find(self, _id, instance = None):
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance)
| 911,862 |
Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status
|
def create(self, instance, parameters, existing=True):
return self.service_instance.create(instance, parameters, existing)
| 911,863 |
Constructor.
Args:
version_string (str): The string that gave too many types.
first_matched_type (str): The name of the first detected type.
second_matched_type (str): The name of the second detected type
|
def __init__(self, version_string, first_matched_type, second_matched_type):
super(TooManyTypesError, self).__init__(
'Release "{}" cannot match types "{}" and "{}"'.format(
version_string, first_matched_type, second_matched_type
)
)
| 911,958 |
Pad the left side of a bitarray with 0s to align its length with byte boundaries.
Args:
bits: A bitarray to be padded and aligned.
Returns:
A newly aligned bitarray.
|
def build_byte_align_buff(bits):
bitmod = len(bits)%8
if bitmod == 0:
rdiff = bitarray()
else:
#KEEP bitarray
rdiff = bitarray(8-bitmod)
rdiff.setall(False)
return rdiff+bits
| 911,977 |
Initialize the option with an option type.
Args:
option (option.Option): The option which is used to validate all
list options.
Raises:
TypeError: If the given option is not an instance of option.Option.
TypeError: If the default value is set but not an iterable.
|
def __init__(self, option=None, default=None, *args, **kwargs):
super(ListOption, self).__init__(*args, **kwargs)
if not isinstance(option, opt.Option):
raise TypeError("Option must be an option type.")
self._option = option
self._default = default
if default is not None:
self._value = self.coerce(default)
| 911,987 |
Initalize the Namespace with options
Args:
description (str, optional): A human readable description of what
the Namespace contains.
**options: Each keyword should be an Option object which will be
added to the Namespace.
Raises:
TypeError: If an entry is not an Option object.
|
def __init__(self, description=None, **options):
self.__doc__ = description
self._options = {}
for name, option in compat.iteritems(options):
self.register(name, option)
super(Namespace, self).__init__()
| 911,997 |
Fetch an option from the dictionary.
Args:
name (str): The name of the option.
default: The value to return if the name is missing.
Returns:
any: The value stored by the option.
This method resolves the option to its value rather than returning
the option object itself. Use the 'options()' method or this object's
iter to get the raw options.
|
def get(self, name, default=None):
option = self._options.get(name, None)
if option is None:
return default
return option.__get__(self)
| 911,998 |
Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
AttributeError: If the name is not registered.
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
|
def set(self, name, value):
if name not in self._options:
raise AttributeError("Option {0} does not exist.".format(name))
return self._options[name].__set__(self, value)
| 911,999 |
Register a new option with the namespace.
Args:
name (str): The name to register the option under.
option (option.Option): The option object to register.
Raises:
TypeError: If the option is not an option.Option object.
ValueError: If the name is already registered.
|
def register(self, name, option):
if name in self._options:
raise ValueError("Option {0} already exists.".format(name))
if not isinstance(option, opt.Option):
raise TypeError("Options must be of type Option.")
self._options[name] = option
| 912,000 |
Set an option value.
Args:
name (str): The name of the option.
value: The value to set the option to.
Raises:
TypeError: If the value is not a string or appropriate native type.
ValueError: If the value is a string but cannot be coerced.
If the name is not registered a new option will be created using the
option generator.
|
def set(self, name, value):
if name not in self._options:
self.register(name, self._generator())
return self._options[name].__set__(self, value)
| 912,003 |
Initialize general controller driver values with defaults.
Args:
dev (usb1.USBDevice) - Device entry the driver will control.
|
def __init__(self, dev):
self._dev = dev
self._dev_handle = None
self._scanchain = None
self._jtagon = False
self._speed = None
| 912,135 |
Run a list of executable primitives on this controller, and distribute the returned data to the associated TDOPromises.
Args:
commands: A list of Executable Primitives to be run in order.
|
def _execute_primitives(self, commands):
for p in commands:
if self._scanchain and self._scanchain._debug:
print(" Executing", p)#pragma: no cover
p.execute(self)
| 912,136 |
Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control.
|
def add(self, promise, bitoffset, *, _offsetideal=None):
#This Assumes that things are added in order.
#Sorting or checking should likely be added.
if _offsetideal is None:
_offsetideal = bitoffset
if isinstance(promise, TDOPromise):
newpromise = promise.makesubatoffset(
bitoffset, _offsetideal=_offsetideal)
self._promises.append(newpromise)
elif isinstance(promise, TDOPromiseCollection):
for p in promise._promises:
self.add(p, bitoffset, _offsetideal=_offsetideal)
| 912,162 |
Finds the shortest string using BFS
Args:
graph (DFA): The DFA states
start (DFA state): The DFA initial state
Returns:
str: The shortest string
|
def bfs(graph, start):
# maintain a queue of paths
queue = []
visited = []
# maintain a queue of nodes
# push the first path into the queue
queue.append([['', start]])
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1][1]
if node.stateid not in visited:
visited.append(node.stateid)
# path found
if node.final != TropicalWeight(float('inf')):
return "".join([mnode[0] for mnode in path])
# enumerate all adjacent nodes, construct a new path and push
# it into the queue
for arc in node.arcs:
char = graph.isyms.find(arc.ilabel)
next_state = graph[arc.nextstate]
# print next_state.stateid
if next_state.stateid not in visited:
new_path = list(path)
new_path.append([char, next_state])
queue.append(new_path)
| 912,337 |
!DEMO!
Simple file parsing generator
Args:
filename: absolute or relative path to file on disk
encoding: encoding string that is passed to open function
|
def parse(filename, encoding=None):
with open(filename, encoding=encoding) as source:
for line in source:
for word in line.split():
yield word
| 912,394 |
Add chain to current shelve file
Args:
name: chain name
order: markov chain order
|
def add_chain(self, name, order):
if name not in self.chains:
setattr(self.chains, name, MarkovChain(order=order))
else:
raise ValueError("Chain with this name already exists")
| 912,403 |
Remove chain from current shelve file
Args:
name: chain name
|
def remove_chain(self, name):
if name in self.chains:
delattr(self.chains, name)
else:
raise ValueError("Chain with this name not found")
| 912,404 |
Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source
|
def build_chain(self, source, chain):
for group in WalkByGroup(source, chain.order+1):
pre = group[:-1]
res = group[-1]
if pre not in chain.content:
chain.content[pre] = {res: 1}
else:
if res not in chain.content[pre]:
chain.content[pre][res] = 1
else:
chain.content[pre][res] += 1
chain.decache()
| 912,405 |
!DEMO!
Demo function that shows how to generate a simple sentence starting with
uppercase letter without lenght limit.
Args:
chain: MarkovChain that will be used to generate sentence
|
def generate_sentence(self, chain):
def weighted_choice(choices):
total_weight = sum(weight for val, weight in choices)
rand = random.uniform(0, total_weight)
upto = 0
for val, weight in choices:
if upto + weight >= rand:
return val
upto += weight
sentence = list(random.choice(chain.startwords))
while not sentence[-1][-1] in ['.', '?', '!']:
sentence.append(
weighted_choice(
chain.content[tuple(sentence[-2:])].items()
)
)
return ' '.join(sentence)
| 912,406 |
Read a value from the configuration, with a default.
Args:
section_name (str): name of the section in the configuration from which
the option should be found.
option (str): name of the configuration option.
default_option (str): name of the default configuration option whose
value should be returned if the requested option is not found.
Returns:
str: the value from the ini file.
|
def get_config_value(self, section_name, option, default_option="default"):
if self.config is None:
self.config = configparser.ConfigParser()
self.config.read(self.ini_file_name)
if option:
try:
return self.config.get(section_name, option)
except configparser.NoOptionError:
log.debug(
"Didn't find a configuration option for '%s' section and '%s' option",
section_name, option,
)
return self.config.get(section_name, default_option)
| 912,586 |
Consumes an input and validates if it is accepted
Args:
mystr (str): the input string to be consumes
stack (list): the stack of symbols
state (int): the current state of the PDA
curchar (int): the index of the consumed character
depth (int): the depth of the function call in the stack
Returns:
bool: A value indicating the correct or erroneous execution
|
def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0):
mystrsplit = mystr.split(' ')
if self.s[state].type == 1:
stack.append(self.s[state].sym)
if len(self.s[state].trans) > 0:
state = self.s[state].trans[0]
if self.parse(
mystr,
stack=stack,
state=state,
curchar=curchar,
depth=depth + 1) == 1:
return True
return False
if self.s[state].type == 2:
if len(stack) == 0:
return False
sym = stack.pop()
for key in self.s[state].trans:
if sym in self.s[state].trans[key]:
if self.parse(
mystr,
stack=stack,
state=key,
curchar=curchar,
depth=depth + 1) == 1:
return True
return False
if self.s[state].type == 3:
for key in self.s[state].trans:
if mystrsplit[curchar] in self.s[state].trans[key]:
# print 'found '
if curchar + 1 == len(mystrsplit) \
and 'closing' in self.s[key].trans:
return True
elif curchar + 1 == len(mystrsplit):
return False
# print 'lets try as next state the state ' + repr(key)
if self.parse(
mystr,
stack=stack,
state=key,
curchar=curchar + 1,
depth=depth + 1) == 1:
return True
return False
| 912,830 |
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
|
def fixminimized(self, alphabet):
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = TropicalWeight(float('inf'))
for char in alphabet:
self.add_arc(endstate, endstate, char)
| 912,943 |
Convert a path to the string representing the path
Args:
path (tuple): A tuple of arcs
Returns:
inp (str): The path concatenated as as string
|
def _path_to_str(self, path):
inp = ''
for arc in path:
i = self.isyms.find(arc.ilabel)
# Ignore \epsilon transitions both on input
if i != fst.EPSILON:
inp += i
return inp
| 912,944 |
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
def init_from_acceptor(self, acceptor):
states = sorted(
acceptor.states,
key=attrgetter('initial'),
reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext)
if state.final:
self[state.stateid].final = True
if state.initial:
self[state.stateid].initial = True
| 912,945 |
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Returns:
bool: A true or false value depending on if the DFA
accepts the provided input
|
def consume_input(self, inp):
cur_state = sorted(
self.states,
key=attrgetter('initial'),
reverse=True)[0]
while len(inp) > 0:
found = False
for arc in cur_state.arcs:
if self.isyms.find(arc.ilabel) == inp[0]:
cur_state = self[arc.nextstate]
inp = inp[1:]
found = True
break
if not found:
return False
return cur_state.final != TropicalWeight(float('inf'))
| 912,946 |
Generate string_length random strings that belong to the automaton.
Args:
string_length (integer): The size of the random string
Returns:
str: The generated string
|
def random_strings(self, string_length=1):
str_list = []
for path in self.uniform_generate(string_length):
str_list.append(self._path_to_str(path))
return str_list
| 912,947 |
Save the machine in the openFST format in the file denoted by
txt_fst_filename.
Args:
txt_fst_filename (str): The name of the file
Returns:
None
|
def save(self, txt_fst_filename):
txt_fst = open(txt_fst_filename, 'w+')
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
for arc in state.arcs:
itext = self.isyms.find(arc.ilabel)
otext = self.osyms.find(arc.ilabel)
txt_fst.write(
'{}\t{}\t{}\t{}\n'.format(
state.stateid,
arc.nextstate,
itext.encode('hex'),
otext.encode('hex')))
if state.final:
txt_fst.write('{}\n'.format(state.stateid))
txt_fst.close()
| 912,948 |
Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_filename (string): The name of the file
Returns:
None
|
def load(self, txt_fst_filename):
with open(txt_fst_filename, 'r') as txt_fst:
for line in txt_fst:
line = line.strip()
splitted_line = line.split()
if len(splitted_line) == 1:
self[int(splitted_line[0])].final = True
else:
self.add_arc(int(splitted_line[0]), int(
splitted_line[1]), splitted_line[2].decode('hex'))
| 912,949 |
Initialization function for Arc's guardgen structure
Args:
src_state_id (int): The source state identifier
dst_state_id (int): The destination state identifier
guard_p: The input character
term: The input term
Returns:
None
|
def __init__(self, src_state_id, dst_state_id, guard_p, term=None):
self.src_state = src_state_id
self.dst_state = dst_state_id
self.guard = guard_p
self.term = None
| 912,955 |
Initialization of the SFA oject
Args:
alphabet (list): The input alphabet
Returns:
None
|
def __init__(self, alphabet=None):
self.states = []
self.arcs = []
self.alphabet = alphabet
| 912,956 |
This function adds a new arc in a SFA state
Args:
src (int): The source state identifier
dst (int): The destination state identifier
char (str): The transition symbol
Returns:
None
|
def add_arc(self, src, dst, char):
assert type(src) == type(int()) and type(dst) == type(int()), \
"State type should be integer."
while src >= len(self.states) or dst >= len(self.states):
self.add_state()
self.states[src].arcs.append(SFAArc(src, dst, char))
| 912,958 |
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Retunrs:
bool: A true or false value depending on if the DFA
accepts the provided input
|
def consume_input(self, inp):
cur_state = self.states[0]
for character in inp:
found = False
for arc in cur_state.arcs:
if arc.guard.is_sat(character):
cur_state = self.states[arc.dst_state]
found = True
break
if not found:
raise RuntimeError('SFA not complete')
return cur_state.final
| 912,959 |
Transforms the SFA into a DFA
Args:
None
Returns:
DFA: The generated DFA
|
def concretize(self):
dfa = DFA(self.alphabet)
for state in self.states:
for arc in state.arcs:
for char in arc.guard:
dfa.add_arc(arc.src_state, arc.dst_state, char)
for i in xrange(len(self.states)):
if self.states[i].final:
dfa[i].final = True
return dfa
| 912,960 |
Read DFA transitions from flex compiled file
Args:
None
Returns:
list: The list of states and the destination for a character
|
def _read_transitions(self):
states = []
i = 0
regex = re.compile('[ \t\n\r:,]+')
found = 0 # For maintaining the state of yy_nxt declaration
state = 0 # For maintaining the state of opening and closing tag of yy_nxt
substate = 0 # For maintaining the state of opening and closing tag of each set in yy_nxt
mapping = [] # For writing each set of yy_next
cur_line = None
with open(self.outfile) as flex_file:
for cur_line in flex_file:
if cur_line[0:35] == "static yyconst flex_int16_t yy_nxt[" or cur_line[0:33] == "static const flex_int16_t yy_nxt[":
found = 1
# print 'Found yy_next declaration'
continue
if found == 1:
if state == 0 and cur_line[0:5] == " {":
state = 1
continue
if state == 1 and cur_line[0:7] == " } ;":
state = 0
break
if substate == 0 and cur_line[0:5] == " {":
mapping = []
substate = 1
continue
if substate == 1:
if cur_line[0:6] != " },":
cur_line = "".join(cur_line.split())
if cur_line == '':
continue
if cur_line[cur_line.__len__() - 1] == ',':
splitted_line = regex.split(
cur_line[:cur_line.__len__() - 1])
else:
splitted_line = regex.split(cur_line)
mapping = mapping + splitted_line
continue
else:
cleared = []
for j in mapping:
cleared.append(int(j))
states.append(cleared)
mapping = []
substate = 0
return states
| 913,101 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.