text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _preprocess(self, filehandle, metadata):
"Runs all attached preprocessors on the provided filehandle."
for process in self._preprocessors:
filehandle = process(filehandle, metadata)
return filehandle
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _postprocess(self, filehandle, metadata):
"Runs all attached postprocessors on the provided filehandle."
for process in self._postprocessors:
filehandle = process(filehandle, metadata)
return filehandle
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, filehandle, destination=None, metadata=None, validate=True, catch_all_errors=False, *args, **kwargs):
"""Saves the filehandle to the provided destination or the attached default destination. Allows passing arbitrary positional and keyword arguments to the saving mechanism :param filehandle: werkzeug.FileStorage instance :param dest: String path, callable or writable destination to pass the filehandle off to. Transfer handles transforming a string or writable object into a callable automatically. :param metadata: Optional mapping of metadata to pass to validators, preprocessors, and postprocessors. :param validate boolean: Toggle validation, defaults to True :param catch_all_errors boolean: Toggles if validation should collect all UploadErrors and raise a collected error message or bail out on the first one. """
|
destination = destination or self._destination
if destination is None:
raise RuntimeError("Destination for filehandle must be provided.")
elif destination is not self._destination:
destination = _make_destination_callable(destination)
if metadata is None:
metadata = {}
if validate:
self._validate(filehandle, metadata)
filehandle = self._preprocess(filehandle, metadata)
destination(filehandle, metadata)
filehandle = self._postprocess(filehandle, metadata)
return filehandle
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def choose_palette(stream=sys.stdout, basic_palette=None):
''' Make a best effort to automatically determine whether to enable
ANSI sequences, and if so, which color palettes are available.
This is the main function of the module—meant to be used unless
something more specific is needed.
Takes the following factors into account:
- Whether output stream is a TTY.
- ``TERM``, ``ANSICON`` environment variables
- ``CLICOLOR``, ``NO_COLOR`` environment variables
Arguments:
stream: Which output file to check: stdout, stderr
basic_palette: Force the platform-dependent 16 color palette,
for testing. List of 16 rgb-int tuples.
Returns:
None, str: 'basic', 'extended', or 'truecolor'
'''
result = None
pal = basic_palette
log.debug('console version: %s', __version__)
log.debug('X11_RGB_PATHS: %r', X11_RGB_PATHS)
if color_is_forced():
result, pal = detect_palette_support(basic_palette=pal) or 'basic'
elif is_a_tty(stream=stream) and color_is_allowed():
result, pal = detect_palette_support(basic_palette=pal)
proximity.build_color_tables(pal)
log.debug('Basic palette: %r', pal)
log.debug('%r', result)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def detect_palette_support(basic_palette=None):
''' Returns whether we think the terminal supports basic, extended, or
truecolor. None if not able to tell.
Returns:
None or str: 'basic', 'extended', 'truecolor'
'''
result = col_init = win_enabled = None
TERM = env.TERM or ''
if os_name == 'nt':
from .windows import (is_ansi_capable, enable_vt_processing,
is_colorama_initialized)
if is_ansi_capable():
win_enabled = all(enable_vt_processing())
col_init = is_colorama_initialized()
# linux, older Windows + colorama
if TERM.startswith('xterm') or (TERM == 'linux') or col_init:
result = 'basic'
# xterm, fbterm, older Windows + ansicon
if ('256color' in TERM) or (TERM == 'fbterm') or env.ANSICON:
result = 'extended'
# https://bugzilla.redhat.com/show_bug.cgi?id=1173688 - obsolete?
if env.COLORTERM in ('truecolor', '24bit') or win_enabled:
result = 'truecolor'
# find the platform-dependent 16-color basic palette
pal_name = 'Unknown'
if result and not basic_palette:
result, pal_name, basic_palette = _find_basic_palette(result)
try:
import webcolors
except ImportError:
webcolors = None
log.debug(
f'{result!r} ({os_name}, TERM={env.TERM or ""}, '
f'COLORTERM={env.COLORTERM or ""}, ANSICON={env.ANSICON}, '
f'webcolors={bool(webcolors)}, basic_palette={pal_name})'
)
return (result, basic_palette)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _find_basic_palette(result):
''' Find the platform-dependent 16-color basic palette.
This is used for "downgrading to the nearest color" support.
'''
pal_name = 'default (xterm)'
basic_palette = color_tables.xterm_palette4
if env.SSH_CLIENT: # fall back to xterm over ssh, info often wrong
pal_name = 'ssh (xterm)'
else:
if os_name == 'nt':
if sys.getwindowsversion()[2] > 16299: # Win10 FCU, new palette
pal_name = 'cmd_1709'
basic_palette = color_tables.cmd1709_palette4
else:
pal_name = 'cmd_legacy'
basic_palette = color_tables.cmd_palette4
elif sys.platform == 'darwin':
if env.TERM_PROGRAM == 'Apple_Terminal':
pal_name = 'termapp'
basic_palette = color_tables.termapp_palette4
elif env.TERM_PROGRAM == 'iTerm.app':
pal_name = 'iterm'
basic_palette = color_tables.iterm_palette4
elif os_name == 'posix':
if env.TERM in ('linux', 'fbterm'):
pal_name = 'vtrgb'
basic_palette = parse_vtrgb()
elif env.TERM.startswith('xterm'):
# fix: LOW64 - Python on Linux on Windows!
if 'Microsoft' in os.uname().release:
pal_name = 'cmd_1709'
basic_palette = color_tables.cmd1709_palette4
result = 'truecolor' # override
elif sys.platform.startswith('freebsd'): # vga console :-/
pal_name = 'vga'
basic_palette = color_tables.vga_palette4
else:
try: # TODO: check green to identify palette, others?
if get_color('index', 2)[0][:2] == '4e':
pal_name = 'tango'
basic_palette = color_tables.tango_palette4
else:
raise RuntimeError('not the color scheme.')
except (IndexError, RuntimeError):
pass
else: # Amiga/Atari :-P
log.warn('Unexpected OS: os.name: %s', os_name)
return result, pal_name, basic_palette
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_available_palettes(chosen_palette):
''' Given a chosen palette, returns tuple of those available,
or None when not found.
Because palette support of a particular level is almost always a
superset of lower levels, this should return all available palettes.
Returns:
Boolean, None: is tty or None if not found.
'''
result = None
try:
result = ALL_PALETTES[:ALL_PALETTES.index(chosen_palette)+1]
except ValueError:
pass
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def is_a_tty(stream=sys.stdout):
''' Detect terminal or something else, such as output redirection.
Returns:
Boolean, None: is tty or None if not found.
'''
result = stream.isatty() if hasattr(stream, 'isatty') else None
log.debug(result)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def load_x11_color_map(paths=X11_RGB_PATHS):
''' Load and parse X11's rgb.txt.
Loads:
x11_color_map: { name_lower: ('R', 'G', 'B') }
'''
if type(paths) is str:
paths = (paths,)
x11_color_map = color_tables.x11_color_map
for path in paths:
try:
with open(path) as infile:
for line in infile:
if line.startswith('!') or line.isspace():
continue
tokens = line.rstrip().split(maxsplit=3)
key = tokens[3]
if ' ' in key: # skip names with spaces to match webcolors
continue
x11_color_map[key.lower()] = tuple(tokens[:3])
log.debug('X11 palette found at %r.', path)
break
except FileNotFoundError as err:
log.debug('X11 palette file not found: %r', path)
except IOError as err:
log.debug('X11 palette file not read: %s', err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def parse_vtrgb(path='/etc/vtrgb'):
''' Parse the color table for the Linux console. '''
palette = ()
table = []
try:
with open(path) as infile:
for i, line in enumerate(infile):
row = tuple(int(val) for val in line.split(','))
table.append(row)
if i == 2: # failsafe
break
palette = tuple(zip(*table)) # swap rows to columns
except IOError as err:
palette = color_tables.vga_palette4
return palette
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _read_until(infile=sys.stdin, maxchars=20, end=RS):
''' Read a terminal response of up to a few characters from stdin. '''
chars = []
read = infile.read
if not isinstance(end, tuple):
end = (end,)
# count down, stopping at 0
while maxchars:
char = read(1)
if char in end:
break
chars.append(char)
maxchars -= 1
return ''.join(chars)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_color(name, number=None):
''' Query the default terminal, for colors, etc.
Direct queries supported on xterm, iTerm, perhaps others.
Arguments:
str: name, one of ('foreground', 'fg', 'background', 'bg',
or 'index') # index grabs a palette index
int: or a "dynamic color number of (4, 10-19)," see links below.
str: number - if name is index, number should be an int from 0…255
Queries terminal using ``OSC # ? BEL`` sequence,
call responds with a color in this X Window format syntax:
- ``rgb:DEAD/BEEF/CAFE``
- `Control sequences
<http://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Operating-System-Commands>`_
- `X11 colors
<https://www.x.org/releases/X11R7.7/doc/libX11/libX11/libX11.html#RGB_Device_String_Specification>`_
Returns:
tuple[int]:
A tuple of four-digit hex strings after parsing,
the last two digits are the least significant and can be
chopped if needed:
``('DEAD', 'BEEF', 'CAFE')``
If an error occurs during retrieval or parsing,
the tuple will be empty.
Examples:
>>> get_color('bg')
('0000', '0000', '0000')
>>> get_color('index', 2) # second color in indexed
('4e4d', '9a9a', '0605') # palette, 2 aka 32 in basic
Note:
Blocks if terminal does not support the function.
Checks is_a_tty() first, since function would also block if i/o
were redirected through a pipe.
On Windows, only able to find palette defaults,
which may be different if they were customized.
To find the palette index instead,
see ``windows.get_color``.
'''
colors = ()
if is_a_tty() and not env.SSH_CLIENT:
if not 'index' in _color_code_map:
_color_code_map['index'] = '4;' + str(number or '')
if os_name == 'nt':
from .windows import get_color
color_id = get_color(name)
if sys.getwindowsversion()[2] > 16299: # Win10 FCU, new palette
basic_palette = color_tables.cmd1709_palette4
else:
basic_palette = color_tables.cmd_palette4
colors = (f'{i:02x}' for i in basic_palette[color_id]) # compat
elif sys.platform == 'darwin':
if env.TERM_PROGRAM == 'iTerm.app':
# supports, though returns two chars per
colors = _get_color_xterm(name, number)
elif os_name == 'posix':
if sys.platform.startswith('freebsd'):
pass
elif env.TERM and env.TERM.startswith('xterm'):
colors = _get_color_xterm(name, number)
return tuple(colors)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_position(fallback=CURSOR_POS_FALLBACK):
''' Return the current column number of the terminal cursor.
Used to figure out if we need to print an extra newline.
Returns:
tuple(int): (x, y), (,) - empty, if an error occurred.
TODO: needs non-ansi mode for Windows
Note:
Checks is_a_tty() first, since function would block if i/o were
redirected through a pipe.
'''
values = fallback
if is_a_tty():
import tty, termios
try:
with TermStack() as fd:
tty.setcbreak(fd, termios.TCSANOW) # shut off echo
sys.stdout.write(CSI + '6n') # screen.dsr, avoid import
sys.stdout.flush()
resp = _read_until(maxchars=10, end='R')
except AttributeError: # no .fileno()
return values
# parse response
resp = resp.lstrip(CSI)
try: # reverse
values = tuple( int(token) for token in resp.partition(';')[::-2] )
except Exception as err:
log.error('parse error: %s on %r', err, resp)
return values
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_theme():
''' Checks system for theme information.
First checks for the environment variable COLORFGBG.
Next, queries terminal, supported on Windows and xterm, perhaps others.
See notes on get_color().
Returns:
str, None: 'dark', 'light', None if no information.
'''
theme = None
log.debug('COLORFGBG: %s', env.COLORFGBG)
if env.COLORFGBG:
FG, _, BG = env.COLORFGBG.partition(';')
theme = 'dark' if BG < '8' else 'light' # background wins
else:
if os_name == 'nt':
from .windows import get_color as _get_color # avoid Unbound Local
color_id = _get_color('background')
theme = 'dark' if color_id < 8 else 'light'
elif os_name == 'posix':
if env.TERM in ('linux', 'fbterm'): # default
theme = 'dark'
elif sys.platform.startswith('freebsd'): # vga console :-/
theme = 'dark'
else:
# try xterm - find average across rgb
colors = get_color('background') # bg wins
if colors:
colors = tuple(int(cm[:2], 16) for cm in colors)
avg = sum(colors) / len(colors)
theme = 'dark' if avg < 128 else 'light'
log.debug('%r', theme)
return theme
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def really_bad_du(path):
"Don't actually use this, it's just an example."
return sum([os.path.getsize(fp) for fp in list_files(path)])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_disk_usage(filehandle, meta):
"""Checks the upload directory to see if the uploaded file would exceed the total disk allotment. Meant as a quick and dirty example. """
|
# limit it at twenty kilobytes if no default is provided
MAX_DISK_USAGE = current_app.config.get('MAX_DISK_USAGE', 20 * 1024)
CURRENT_USAGE = really_bad_du(current_app.config['UPLOAD_PATH'])
filehandle.seek(0, os.SEEK_END)
if CURRENT_USAGE + filehandle.tell() > MAX_DISK_USAGE:
filehandle.close()
raise UploadError("Upload exceeds allotment.")
filehandle.seek(0)
return filehandle
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_version(filename, version='1.00'):
''' Read version as text to avoid machinations at import time. '''
with open(filename) as infile:
for line in infile:
if line.startswith('__version__'):
try:
version = line.split("'")[1]
except IndexError:
pass
break
return version
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'):
''' Given three integers representing R, G, and B,
return the nearest color index.
Arguments:
r: int - of range 0…255
g: int - of range 0…255
b: int - of range 0…255
Returns:
int, None: index, or None on error.
'''
shortest_distance = 257*257*3 # max eucl. distance from #000000 to #ffffff
index = 0 # default to black
if not color_table:
if not color_table8:
build_color_tables()
color_table = color_table8
for i, values in enumerate(color_table):
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
this_distance = (rd * rd) + (gd * gd) + (bd * bd)
if this_distance < shortest_distance: # closer
index = i
shortest_distance = this_distance
return index
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'):
''' Given a three or six-character hex digit string, return the nearest
color index.
Arguments:
hexdigits: a three/6 digit hex string, e.g. 'b0b', '123456'
Returns:
int, None: index, or None on error.
'''
triplet = []
try:
if len(hexdigits) == 3:
for digit in hexdigits:
digit = int(digit, 16)
triplet.append((digit * 16) + digit)
elif len(hexdigits) == 6:
triplet.extend(int(hexdigits[i:i+2], 16) for i in (0, 2, 4))
else:
raise ValueError('wrong length: %r' % hexdigits)
except ValueError:
return None
return find_nearest_color_index(*triplet,
color_table=color_table,
method=method)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def add_interval(self, start, end, data=None):
'''
Inserts an interval to the tree.
Note that when inserting we do not maintain appropriate sorting of the "mid" data structure.
This should be done after all intervals are inserted.
'''
# Ignore intervals of 0 or negative length
if (end - start) <= 0:
return
if self.single_interval is None:
# This is an empty tree and we are adding the first interval. Just record it in a field.
self.single_interval = (start, end, data)
elif self.single_interval == 0:
# This is a usual tree, use standard addition method
self._add_interval(start, end, data)
else:
# This is a tree with a single interval. Convert to a usual tree.
self._add_interval(*self.single_interval)
self.single_interval = 0
self._add_interval(start, end, data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _query(self, x, result):
'''
Same as self.query, but uses a provided list to accumulate results into.
'''
if self.single_interval is None: # Empty
return
elif self.single_interval != 0: # Single interval, just check whether x is in it
if self.single_interval[0] <= x < self.single_interval[1]:
result.append(self.single_interval)
elif x < self.center: # Normal tree, query point to the left of center
if self.left_subtree is not None:
self.left_subtree._query(x, result)
for int in self.mid_sorted_by_start:
if int[0] <= x:
result.append(int)
else:
break
else: # Normal tree, query point to the right of center
for int in self.mid_sorted_by_end:
if int[1] > x:
result.append(int)
else:
break
if self.right_subtree is not None:
self.right_subtree._query(x, result)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_complementation(dfa: dict) -> dict: """ Returns a DFA that accepts any word but he ones accepted by the input DFA. Let A be a completed DFA, :math:`Ā = (Σ, S, s_0 , ρ, S − F )` is the DFA that runs A but accepts whatever word A does not. :param dict dfa: input DFA. :return: *(dict)* representing the complement of the input DFA. """
|
dfa_complement = dfa_completion(deepcopy(dfa))
dfa_complement['accepting_states'] = \
dfa_complement['states'].difference(dfa_complement['accepting_states'])
return dfa_complement
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_intersection(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the intersection of the DFAs in input. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two DFAs. Then there is a DFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when both accept. It is defined as: :math:`A_∧ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, F_1 × F_2 )` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2}= ρ_2 (s_2 , a)` Implementation proposed guarantees the resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the intersected DFA. """
|
intersection = {
'alphabet': dfa_1['alphabet'].intersection(dfa_2['alphabet']),
'states': {(dfa_1['initial_state'], dfa_2['initial_state'])},
'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']),
'accepting_states': set(),
'transitions': dict()
}
boundary = set()
boundary.add(intersection['initial_state'])
while boundary:
(state_dfa_1, state_dfa_2) = boundary.pop()
if state_dfa_1 in dfa_1['accepting_states'] \
and state_dfa_2 in dfa_2['accepting_states']:
intersection['accepting_states'].add((state_dfa_1, state_dfa_2))
for a in intersection['alphabet']:
if (state_dfa_1, a) in dfa_1['transitions'] \
and (state_dfa_2, a) in dfa_2['transitions']:
next_state_1 = dfa_1['transitions'][state_dfa_1, a]
next_state_2 = dfa_2['transitions'][state_dfa_2, a]
if (next_state_1, next_state_2) not in intersection['states']:
intersection['states'].add((next_state_1, next_state_2))
boundary.add((next_state_1, next_state_2))
intersection['transitions'][(state_dfa_1, state_dfa_2), a] = \
(next_state_1, next_state_2)
return intersection
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_union(dfa_1: dict, dfa_2: dict) -> dict: """ Returns a DFA accepting the union of the input DFAs. Let :math:`A_1 = (Σ, S_1 , s_{01} , ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s_{02} , ρ_2 , F_2 )` be two completed DFAs. Then there is a DFA :math:`A_∨` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word and accepts when one of them accepts. It is defined as: :math:`A_∨ = (Σ, S_1 × S_2 , (s_{01} , s_{02} ), ρ, (F_1 × S_2 ) ∪ (S_1 × F_2 ))` where :math:`ρ((s_1 , s_2 ), a) = (s_{X1} , s_{X2} )` iff :math:`s_{X1} = ρ_1 (s_1 , a)` and :math:`s_{X2} = ρ(s_2 , a)` Proposed implementation guarantees resulting DFA has only **reachable** states. :param dict dfa_1: first input DFA; :param dict dfa_2: second input DFA. :return: *(dict)* representing the united DFA. """
|
dfa_1 = deepcopy(dfa_1)
dfa_2 = deepcopy(dfa_2)
dfa_1['alphabet'] = dfa_2['alphabet'] = dfa_1['alphabet'].union(
dfa_2['alphabet']) # to complete the DFAs over all possible transition
dfa_1 = dfa_completion(dfa_1)
dfa_2 = dfa_completion(dfa_2)
union = {
'alphabet': dfa_1['alphabet'].copy(),
'states': {(dfa_1['initial_state'], dfa_2['initial_state'])},
'initial_state': (dfa_1['initial_state'], dfa_2['initial_state']),
'accepting_states': set(),
'transitions': dict()
}
boundary = set()
boundary.add(union['initial_state'])
while boundary:
(state_dfa_1, state_dfa_2) = boundary.pop()
if state_dfa_1 in dfa_1['accepting_states'] \
or state_dfa_2 in dfa_2['accepting_states']:
union['accepting_states'].add((state_dfa_1, state_dfa_2))
for a in union['alphabet']:
# as DFAs are completed they surely have the transition
next_state_1 = dfa_1['transitions'][state_dfa_1, a]
next_state_2 = dfa_2['transitions'][state_dfa_2, a]
if (next_state_1, next_state_2) not in union['states']:
union['states'].add((next_state_1, next_state_2))
boundary.add((next_state_1, next_state_2))
union['transitions'][(state_dfa_1, state_dfa_2), a] = \
(next_state_1, next_state_2)
return union
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_minimization(dfa: dict) -> dict: """ Returns the minimization of the DFA in input through a greatest fix-point method. Given a completed DFA :math:`A = (Σ, S, s_0 , ρ, F )` there exists a single minimal DFA :math:`A_m` which is equivalent to A, i.e. reads the same language :math:`L(A) = L(A_m)` and with a minimal number of states. To construct such a DFA we exploit bisimulation as a suitable equivalence relation between states. A bisimulation relation :math:`E ∈ S × S` is a relation between states that satisfies the following condition: if :math:`(s, t) ∈ E` then: • s ∈ F iff t ∈ F; • For all :math:`(s_X,a)` such that :math:`ρ(s, a) = s_X`, there exists :math:`t_X` such that :math:`ρ(t, a) = t_X` and :math:`(s_X , t_X ) ∈ E`; • For all :math:`(t_X,a)` such that :math:`ρ(t, a) = t_X` , there exists :math:`s_X` such that :math:`ρ(s, a) = s_X` and :math:`(s_X , t_X ) ∈ E`. :param dict dfa: input DFA. :return: *(dict)* representing the minimized DFA. """
|
dfa = dfa_completion(deepcopy(dfa))
################################################################
### Greatest-fixpoint
z_current = set()
z_next = set()
# First bisimulation condition check (can be done just once)
# s ∈ F iff t ∈ F
for state_s in dfa['states']:
for state_t in dfa['states']:
if (
state_s in dfa['accepting_states']
and state_t in dfa['accepting_states']
) or (
state_s not in dfa['accepting_states']
and state_t not in dfa['accepting_states']
):
z_next.add((state_s, state_t))
# Second and third condition of bisimularity check
while z_current != z_next:
z_current = z_next
z_next = z_current.copy()
for (state_1, state_2) in z_current:
# for all s0,a s.t. ρ(s, a) = s_0 , there exists t 0
# s.t. ρ(t, a) = t 0 and (s_0 , t 0 ) ∈ Z i ;
for a in dfa['alphabet']:
if (state_1, a) in dfa['transitions'] \
and (state_2, a) in dfa['transitions']:
if (
dfa['transitions'][state_1, a],
dfa['transitions'][state_2, a]
) not in z_current:
z_next.remove((state_1, state_2))
break
else:
# action a not possible in state element[0]
# or element[1]
z_next.remove((state_1, state_2))
break
################################################################
### Equivalence Sets
equivalence = dict()
for (state_1, state_2) in z_current:
equivalence.setdefault(state_1, set()).add(state_2)
################################################################
### Minimal DFA construction
dfa_min = {
'alphabet': dfa['alphabet'].copy(),
'states': set(),
'initial_state': dfa['initial_state'],
'accepting_states': set(),
'transitions': dfa['transitions'].copy()
}
# select one element for each equivalence set
for equivalence_set in equivalence.values():
if dfa_min['states'].isdisjoint(equivalence_set):
e = equivalence_set.pop()
dfa_min['states'].add(e) # TODO highlight this instruction
equivalence_set.add(e)
dfa_min['accepting_states'] = \
dfa_min['states'].intersection(dfa['accepting_states'])
for t in dfa['transitions']:
if t[0] not in dfa_min['states']:
dfa_min['transitions'].pop(t)
elif dfa['transitions'][t] not in dfa_min['states']:
dfa_min['transitions'][t] = \
equivalence[dfa['transitions'][t]]. \
intersection(dfa_min['states']).pop()
return dfa_min
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_reachable(dfa: dict) -> dict: """ Side effects on input! Removes unreachable states from a DFA and returns the pruned DFA. It is possible to remove from a DFA A all unreachable states from the initial state without altering the language. The reachable DFA :math:`A_R` corresponding to A is defined as: :math:`A_R = (Σ, S_R , s_0 , ρ|S_R , F ∩ S_R )` where • :math:`S_R` set of reachable state from the initial one • :math:`ρ|S_R` is the restriction on :math:`S_R × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the pruned DFA. """
|
reachable_states = set() # set of reachable states from root
boundary = set()
reachable_states.add(dfa['initial_state'])
boundary.add(dfa['initial_state'])
while boundary:
s = boundary.pop()
for a in dfa['alphabet']:
if (s, a) in dfa['transitions']:
if dfa['transitions'][s, a] not in reachable_states:
reachable_states.add(dfa['transitions'][s, a])
boundary.add(dfa['transitions'][s, a])
dfa['states'] = reachable_states
dfa['accepting_states'] = \
dfa['accepting_states'].intersection(dfa['states'])
transitions = dfa[
'transitions'].copy() # TODO why copy? because for doesn't cycle
# mutable set....
for t in transitions:
if t[0] not in dfa['states']:
dfa['transitions'].pop(t)
elif dfa['transitions'][t] not in dfa['states']:
dfa['transitions'].pop(t)
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_co_reachable(dfa: dict) -> dict: """ Side effects on input! Removes from the DFA all states that do not reach a final state and returns the pruned DFA. It is possible to remove from a DFA A all states that do not reach a final state without altering the language. The co-reachable dfa :math:`A_F` corresponding to A is defined as: :math:`A_F = (Σ, S_F , s_0 , ρ|S_F , F )` where • :math:`S_F` is the set of states that reach a final state • :math:`ρ|S_F` is the restriction on :math:`S_F × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the pruned DFA. """
|
co_reachable_states = dfa['accepting_states'].copy()
boundary = co_reachable_states.copy()
# inverse transition function
inverse_transitions = dict()
for key, value in dfa['transitions'].items():
inverse_transitions.setdefault(value, set()).add(key)
while boundary:
s = boundary.pop()
if s in inverse_transitions:
for (state, action) in inverse_transitions[s]:
if state not in co_reachable_states:
boundary.add(state)
co_reachable_states.add(state)
dfa['states'] = co_reachable_states
# If not s_0 ∈ S_F the resulting dfa is empty
if dfa['initial_state'] not in dfa['states']:
dfa = {
'alphabet': set(),
'states': set(),
'initial_state': None,
'accepting_states': set(),
'transitions': dict()
}
return dfa
transitions = dfa['transitions'].copy()
for t in transitions:
if t[0] not in dfa['states']:
dfa['transitions'].pop(t)
elif dfa['transitions'][t] not in dfa['states']:
dfa['transitions'].pop(t)
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_trimming(dfa: dict) -> dict: """ Side effects on input! Returns the DFA in input trimmed, so both reachable and co-reachable. Given a DFA A, the corresponding trimmed DFA contains only those states that are reachable from the initial state and that lead to a final state. The trimmed dfa :math:`A_{RF}` corresponding to A is defined as :math:`A_{RF} = (Σ, S_R ∩ S_F , s_0 , ρ|S_R∩S_F , F ∩ S_R )` where • :math:`S_R` set of reachable states from the initial state • :math:`S_F` set of states that reaches a final state • :math:`ρ|S_R∩S_F` is the restriction on :math:`(S_R ∩ S_F ) × Σ` of ρ. :param dict dfa: input DFA. :return: *(dict)* representing the trimmed input DFA. """
|
# Reachable DFA
dfa = dfa_reachable(dfa)
# Co-reachable DFA
dfa = dfa_co_reachable(dfa)
# trimmed DFA
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _load_chains(f):
'''
Loads all LiftOverChain objects from a file into an array. Returns the result.
'''
chains = []
while True:
line = f.readline()
if not line:
break
if line.startswith(b'#') or line.startswith(b'\n') or line.startswith(b'\r'):
continue
if line.startswith(b'chain'):
# Read chain
chains.append(LiftOverChain(line, f))
continue
return chains
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def checksum(string):
""" Compute the Luhn checksum for the provided string of digits. Note this assumes the check digit is in place. """
|
digits = list(map(int, string))
odd_sum = sum(digits[-1::-2])
even_sum = sum([sum(divmod(2 * d, 10)) for d in digits[-2::-2]])
return (odd_sum + even_sum) % 10
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_blocked(self, ip):
"""Determine if an IP address should be considered blocked."""
|
blocked = True
if ip in self.allowed_admin_ips:
blocked = False
for allowed_range in self.allowed_admin_ip_ranges:
if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range):
blocked = False
return blocked
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serve(args):
"""Start a server which will watch .md and .rst files for changes. If a md file changes, the Home Documentation is rebuilt. If a .rst file changes, the updated sphinx project is rebuilt Args: args (ArgumentParser):
flags from the CLI """
|
# Sever's parameters
port = args.serve_port or PORT
host = "0.0.0.0"
# Current working directory
dir_path = Path().absolute()
web_dir = dir_path / "site"
# Update routes
utils.set_routes()
# Offline mode
if args.offline:
os.environ["MKINX_OFFLINE"] = "true"
_ = subprocess.check_output("mkdocs build > /dev/null", shell=True)
utils.make_offline()
class MkinxHTTPHandler(SimpleHTTPRequestHandler):
"""Class routing urls (paths) to projects (resources)
"""
def translate_path(self, path):
# default root -> cwd
location = str(web_dir)
route = location
if len(path) != 0 and path != "/":
for key, loc in utils.get_routes():
if path.startswith(key):
location = loc
path = path[len(key) :]
break
if location[-1] == "/" or not path or path[0] == "/":
route = location + path
else:
route = location + "/" + path
return route.split("?")[0]
# Serve as deamon thread
success = False
count = 0
print("Waiting for server port...")
try:
while not success:
try:
httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler)
success = True
except OSError:
count += 1
finally:
if not success and count > 20:
s = "port {} seems occupied. Try with {} ? (y/n)"
if "y" in input(s.format(port, port + 1)):
port += 1
count = 0
else:
print("You can specify a custom port with mkinx serve -s")
return
time.sleep(0.5)
except KeyboardInterrupt:
print("Aborting.")
return
httpd.allow_reuse_address = True
print("\nServing at http://{}:{}\n".format(host, port))
thread = threading.Thread(target=httpd.serve_forever)
thread.daemon = True
thread.start()
# Watch for changes
event_handler = utils.MkinxFileHandler(
patterns=["*.rst", "*.md", "*.yml", "*.yaml"]
)
observer = Observer()
observer.schedule(event_handler, path=str(dir_path), recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
httpd.server_close()
observer.join()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def train(self, X_train, Y_train, X_test, Y_test):
"""Train and validate the LR on a train and test dataset Args: X_train (np.array):
Training data Y_train (np.array):
Training labels X_test (np.array):
Test data Y_test (np.array):
Test labels """
|
while True:
print(1)
time.sleep(1)
if random.randint(0, 9) >= 5:
break
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download(url, path, kind='file', progressbar=True, replace=False, timeout=10., verbose=True):
"""Download a URL. This will download a file and store it in a '~/data/` folder, creating directories if need be. It will also work for zip files, in which case it will unzip all of the files to the desired location. Parameters url : string The url of the file to download. This may be a dropbox or google drive "share link", or a regular URL. If it is a share link, then it should point to a single file and not a folder. To download folders, zip them first. path : string The path where the downloaded file will be stored. If ``zipfile`` is True, then this must be a folder into which files will be zipped. kind : one of ['file', 'zip', 'tar', 'tar.gz'] The kind of file to be downloaded. If not 'file', then the file contents will be unpackaged according to the kind specified. Package contents will be placed in ``root_destination/<name>``. progressbar : bool Whether to display a progress bar during file download. replace : bool If True and the URL points to a single file, overwrite the old file if possible. timeout : float The URL open timeout. verbose : bool Whether to print download status to the screen. Returns ------- out_path : string A path to the downloaded file (or folder, in the case of a zip file). """
|
if kind not in ALLOWED_KINDS:
raise ValueError('`kind` must be one of {}, got {}'.format(
ALLOWED_KINDS, kind))
# Make sure we have directories to dump files
path = op.expanduser(path)
if len(path) == 0:
raise ValueError('You must specify a path. For current directory use .')
download_url = _convert_url_to_downloadable(url)
if replace is False and op.exists(path):
msg = ('Replace is False and data exists, so doing nothing. '
'Use replace==True to re-download the data.')
elif kind in ZIP_KINDS:
# Create new folder for data if we need it
if not op.isdir(path):
if verbose:
tqdm.write('Creating data folder...')
os.makedirs(path)
# Download the file to a temporary folder to unzip
path_temp = _TempDir()
path_temp_file = op.join(path_temp, "tmp.{}".format(kind))
_fetch_file(download_url, path_temp_file, timeout=timeout,
verbose=verbose)
# Unzip the file to the out path
if verbose:
tqdm.write('Extracting {} file...'.format(kind))
if kind == 'zip':
zipper = ZipFile
elif kind == 'tar':
zipper = tarfile.open
elif kind == 'tar.gz':
zipper = partial(tarfile.open, mode='r:gz')
with zipper(path_temp_file) as myobj:
myobj.extractall(path)
msg = 'Successfully downloaded / unzipped to {}'.format(path)
else:
if not op.isdir(op.dirname(path)):
os.makedirs(op.dirname(path))
_fetch_file(download_url, path, timeout=timeout, verbose=verbose)
msg = 'Successfully downloaded file to {}'.format(path)
if verbose:
tqdm.write(msg)
return path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _convert_url_to_downloadable(url):
"""Convert a url to the proper style depending on its website."""
|
if 'drive.google.com' in url:
# For future support of google drive
file_id = url.split('d/')[1].split('/')[0]
base_url = 'https://drive.google.com/uc?export=download&id='
out = '{}{}'.format(base_url, file_id)
elif 'dropbox.com' in url:
if url.endswith('.png'):
out = url + '?dl=1'
else:
out = url.replace('dl=0', 'dl=1')
elif 'github.com' in url:
out = url.replace('github.com', 'raw.githubusercontent.com')
out = out.replace('blob/', '')
else:
out = url
return out
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def md5sum(fname, block_size=1048576):
# 2 ** 20 """Calculate the md5sum for a file. Parameters fname : str Filename. block_size : int Block size to use when reading. Returns ------- hash_ : str The hexadecimal digest of the hash. """
|
md5 = hashlib.md5()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _chunk_write(chunk, local_file, progress):
"""Write a chunk to file and update the progress bar."""
|
local_file.write(chunk)
if progress is not None:
progress.update(len(chunk))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str. Parameters num : int The number of bytes. Returns ------- size : str The size in human-readable format. """
|
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def retry(f, exc_classes=DEFAULT_EXC_CLASSES, logger=None, retry_log_level=logging.INFO, retry_log_message="Connection broken in '{f}' (error: '{e}'); " "retrying with new connection.", max_failures=None, interval=0, max_failure_log_level=logging.ERROR, max_failure_log_message="Max retries reached for '{f}'. Aborting."):
""" Decorator to automatically reexecute a function if the connection is broken for any reason. """
|
exc_classes = tuple(exc_classes)
@wraps(f)
def deco(*args, **kwargs):
failures = 0
while True:
try:
return f(*args, **kwargs)
except exc_classes as e:
if logger is not None:
logger.log(retry_log_level,
retry_log_message.format(f=f.func_name, e=e))
gevent.sleep(interval)
failures += 1
if max_failures is not None \
and failures > max_failures:
if logger is not None:
logger.log(max_failure_log_level,
max_failure_log_message.format(
f=f.func_name, e=e))
raise
return deco
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self):
""" Get a connection from the pool, to make and receive traffic. If the connection fails for any reason (socket.error), it is dropped and a new one is scheduled. Please use @retry as a way to automatically retry whatever operation you were performing. """
|
self.lock.acquire()
try:
c = self.conn.popleft()
yield c
except self.exc_classes:
# The current connection has failed, drop it and create a new one
gevent.spawn_later(1, self._addOne)
raise
except:
self.conn.append(c)
self.lock.release()
raise
else:
# NOTE: cannot use finally because MUST NOT reuse the connection
# if it failed (socket.error)
self.conn.append(c)
self.lock.release()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eff_default_transformer(fills=EFF_DEFAULT_FILLS):
""" Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect. """
|
def _transformer(vals):
if len(vals) == 0:
return fills
else:
# ignore all but first effect
match_eff_main = _prog_eff_main.match(vals[0])
if match_eff_main is None:
logging.warning(
'match_eff_main is None: vals={}'.format(str(vals[0]))
)
return fills
eff = [match_eff_main.group(1)] \
+ match_eff_main.group(2).split(b'|')
result = tuple(
fill if v == b''
else int(v) if i == 5 or i == 10
else (1 if v == b'CODING' else 0) if i == 8
else v
for i, (v, fill) in enumerate(list(zip(eff, fills))[:11])
)
return result
return _transformer
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ann_default_transformer(fills=ANN_DEFAULT_FILLS):
""" Return a simple transformer function for parsing ANN annotations. N.B., ignores all but the first effect. """
|
def _transformer(vals):
if len(vals) == 0:
return fills
else:
# ignore all but first effect
ann = vals[0].split(b'|')
ann = ann[:11] + _ann_split2(ann[11]) + _ann_split2(ann[12]) + \
_ann_split2(ann[13]) + ann[14:]
result = tuple(
fill if v == b''
else int(v.partition(b'/')[0]) if i == 8
else int(v) if 11 <= i < 18
else v
for i, (v, fill) in enumerate(list(zip(ann, fills))[:18])
)
return result
return _transformer
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overloaded(func):
""" Introduces a new overloaded function and registers its first implementation. """
|
fn = unwrap(func)
ensure_function(fn)
def dispatcher(*args, **kwargs):
resolved = None
if dispatcher.__complex_parameters:
cache_key_pos = []
cache_key_kw = []
for argset in (0, 1) if kwargs else (0,):
if argset == 0:
arg_pairs = enumerate(args)
complexity_mapping = dispatcher.__complex_positions
else:
arg_pairs = kwargs.items()
complexity_mapping = dispatcher.__complex_parameters
for id, arg in arg_pairs:
type_ = type(arg)
element_type = None
if id in complexity_mapping:
try:
element = next(iter(arg))
except TypeError:
pass
except StopIteration:
element_type = _empty
else:
complexity = complexity_mapping[id]
if complexity & 8 and isinstance(arg, tuple):
element_type = tuple(type(el) for el in arg)
elif complexity & 4 and hasattr(arg, 'keys'):
element_type = (type(element), type(arg[element]))
else:
element_type = type(element)
if argset == 0:
cache_key_pos.append((type_, element_type))
else:
cache_key_kw.append((id, type_, element_type))
else:
cache_key_pos = (type(arg) for arg in args)
cache_key_kw = ((name, type(arg)) for (name, arg) in kwargs.items()) if kwargs else None
cache_key = (tuple(cache_key_pos),
tuple(sorted(cache_key_kw)) if kwargs else None)
try:
resolved = dispatcher.__cache[cache_key]
except KeyError:
resolved = find(dispatcher, args, kwargs)
if resolved:
dispatcher.__cache[cache_key] = resolved
if resolved:
before = dispatcher.__hooks['before']
after = dispatcher.__hooks['after']
if before:
before(*args, **kwargs)
result = resolved(*args, **kwargs)
if after:
after(*args, **kwargs)
return result
else:
return error(dispatcher.__name__)
dispatcher.__dict__.update(
__functions = [],
__hooks = {'before': None, 'after': None},
__cache = {},
__complex_positions = {},
__complex_parameters = {},
__maxlen = 0,
)
for attr in ('__module__', '__name__', '__qualname__', '__doc__'):
setattr(dispatcher, attr, getattr(fn, attr, None))
if is_void(fn):
update_docstring(dispatcher, fn)
return dispatcher
else:
update_docstring(dispatcher)
return register(dispatcher, func)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(dispatcher, func, *, hook=None):
""" Registers `func` as an implementation on `dispatcher`. """
|
wrapper = None
if isinstance(func, (classmethod, staticmethod)):
wrapper = type(func)
func = func.__func__
ensure_function(func)
if isinstance(dispatcher, (classmethod, staticmethod)):
wrapper = None
dp = unwrap(dispatcher)
try:
dp.__functions
except AttributeError:
raise OverloadingError("%r has not been set up as an overloaded function." % dispatcher)
fn = unwrap(func)
if hook:
dp.__hooks[hook] = func
else:
signature = get_signature(fn)
for i, type_ in enumerate(signature.types):
if not isinstance(type_, type):
raise OverloadingError(
"Failed to overload function '{0}': parameter '{1}' has "
"an annotation that is not a type."
.format(dp.__name__, signature.parameters[i]))
for fninfo in dp.__functions:
dup_sig = sig_cmp(signature, fninfo.signature)
if dup_sig and signature.has_varargs == fninfo.signature.has_varargs:
raise OverloadingError(
"Failed to overload function '{0}': non-unique signature ({1})."
.format(dp.__name__, str.join(', ', (_repr(t) for t in dup_sig))))
# All clear; register the function.
dp.__functions.append(FunctionInfo(func, signature))
dp.__cache.clear()
dp.__maxlen = max(dp.__maxlen, len(signature.parameters))
if typing:
# For each parameter position and name, compute a bitwise union of complexity
# values over all registered signatures. Retain the result for parameters where
# a nonzero value occurs at least twice and at least one of those values is >= 2.
# Such parameters require deep type-checking during function resolution.
position_values = defaultdict(lambda: 0)
keyword_values = defaultdict(lambda: 0)
position_counter = Counter()
keyword_counter = Counter()
for fninfo in dp.__functions:
sig = fninfo.signature
complex_positions = {i: v for i, v in enumerate(sig.complexity) if v}
complex_keywords = {p: v for p, v in zip(sig.parameters, sig.complexity) if v}
for i, v in complex_positions.items():
position_values[i] |= v
for p, v in complex_keywords.items():
keyword_values[p] |= v
position_counter.update(complex_positions.keys())
keyword_counter.update(complex_keywords.keys())
dp.__complex_positions = {
i: v for i, v in position_values.items() if v >= 2 and position_counter[i] > 1}
dp.__complex_parameters = {
p: v for p, v in keyword_values.items() if v >= 2 and keyword_counter[p] > 1}
if wrapper is None:
wrapper = lambda x: x
if func.__name__ == dp.__name__:
# The returned function is going to be bound to the invocation name
# in the calling scope, so keep returning the dispatcher.
return wrapper(dispatcher)
else:
return wrapper(func)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(dispatcher, args, kwargs):
""" Given the arguments contained in `args` and `kwargs`, returns the best match from the list of implementations registered on `dispatcher`. """
|
matches = []
full_args = args
full_kwargs = kwargs
for func, sig in dispatcher.__functions:
params = sig.parameters
param_count = len(params)
# Filter out arguments that will be consumed by catch-all parameters
# or by keyword-only parameters.
if sig.has_varargs:
args = full_args[:param_count]
else:
args = full_args
if sig.has_varkw or sig.has_kwonly:
kwargs = {kw: full_kwargs[kw] for kw in params if kw in full_kwargs}
else:
kwargs = full_kwargs
kwarg_set = set(kwargs)
arg_count = len(args) + len(kwargs)
optional_count = len(sig.defaults)
required_count = param_count - optional_count
# Consider candidate functions that satisfy basic conditions:
# - argument count matches signature
# - all keyword arguments are recognized.
if not 0 <= param_count - arg_count <= optional_count:
continue
if kwargs and not kwarg_set <= set(params):
continue
if kwargs and args and kwarg_set & set(params[:len(args)]):
raise TypeError("%s() got multiple values for the same parameter"
% dispatcher.__name__)
arg_score = arg_count # >= 0
type_score = 0
specificity_score = [None] * dispatcher.__maxlen
sig_score = required_count
var_score = -sig.has_varargs
indexed_kwargs = ((params.index(k), v) for k, v in kwargs.items()) if kwargs else ()
for param_pos, value in chain(enumerate(args), indexed_kwargs):
param_name = params[param_pos]
if value is None and sig.defaults.get(param_name, _empty) is None:
expected_type = type(None)
else:
expected_type = sig.types[param_pos]
specificity = compare(value, expected_type)
if specificity[0] == -1:
break
specificity_score[param_pos] = specificity
type_score += 1
else:
score = (arg_score, type_score, specificity_score, sig_score, var_score)
matches.append(Match(score, func, sig))
if matches:
if len(matches) > 1:
matches.sort(key=lambda m: m.score, reverse=True)
if DEBUG:
assert matches[0].score > matches[1].score
return matches[0].func
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_signature(func):
""" Gathers information about the call signature of `func`. """
|
code = func.__code__
# Names of regular parameters
parameters = tuple(code.co_varnames[:code.co_argcount])
# Flags
has_varargs = bool(code.co_flags & inspect.CO_VARARGS)
has_varkw = bool(code.co_flags & inspect.CO_VARKEYWORDS)
has_kwonly = bool(code.co_kwonlyargcount)
# A mapping of parameter names to default values
default_values = func.__defaults__ or ()
defaults = dict(zip(parameters[-len(default_values):], default_values))
# Type annotations for all parameters
type_hints = typing.get_type_hints(func) if typing else func.__annotations__
types = tuple(normalize_type(type_hints.get(param, AnyType)) for param in parameters)
# Type annotations for required parameters
required = types[:-len(defaults)] if defaults else types
# Complexity
complexity = tuple(map(type_complexity, types)) if typing else None
return Signature(parameters, types, complexity, defaults, required,
has_varargs, has_varkw, has_kwonly)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalize_type(type_, level=0):
""" Reduces an arbitrarily complex type declaration into something manageable. """
|
if not typing or not isinstance(type_, typing.TypingMeta) or type_ is AnyType:
return type_
if isinstance(type_, typing.TypeVar):
if type_.__constraints__ or type_.__bound__:
return type_
else:
return AnyType
if issubclass(type_, typing.Union):
if not type_.__union_params__:
raise OverloadingError("typing.Union must be parameterized")
return typing.Union[tuple(normalize_type(t, level) for t in type_.__union_params__)]
if issubclass(type_, typing.Tuple):
params = type_.__tuple_params__
if level > 0 or params is None:
return typing.Tuple
elif type_.__tuple_use_ellipsis__:
return typing.Tuple[normalize_type(params[0], level + 1), ...]
else:
return typing.Tuple[tuple(normalize_type(t, level + 1) for t in params)]
if issubclass(type_, typing.Callable):
return typing.Callable
if isinstance(type_, typing.GenericMeta):
base = find_base_generic(type_)
if base is typing.Generic:
return type_
else:
return GenericWrapper(type_, base, level > 0)
raise OverloadingError("%r not supported yet" % type_)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def type_complexity(type_):
"""Computes an indicator for the complexity of `type_`. If the return value is 0, the supplied type is not parameterizable. Otherwise, set bits in the return value denote the following features: - bit 0: The type could be parameterized but is not. - bit 1: The type represents an iterable container with 1 constrained type parameter. - bit 2: The type represents a mapping with a constrained value type (2 parameters). - bit 3: The type represents an n-tuple (n parameters). Since these features are mutually exclusive, only a `Union` can have more than one bit set. """
|
if (not typing
or not isinstance(type_, (typing.TypingMeta, GenericWrapperMeta))
or type_ is AnyType):
return 0
if issubclass(type_, typing.Union):
return reduce(operator.or_, map(type_complexity, type_.__union_params__))
if issubclass(type_, typing.Tuple):
if type_.__tuple_params__ is None:
return 1
elif type_.__tuple_use_ellipsis__:
return 2
else:
return 8
if isinstance(type_, GenericWrapperMeta):
type_count = 0
for p in reversed(type_.parameters):
if type_count > 0:
type_count += 1
if p is AnyType:
continue
if not isinstance(p, typing.TypeVar) or p.__constraints__ or p.__bound__:
type_count += 1
return 1 << min(type_count, 2)
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_base_generic(type_):
"""Locates the underlying generic whose structure and behavior are known. For example, the base generic of a type that inherits from `typing.Mapping[T, int]` is `typing.Mapping`. """
|
for t in type_.__mro__:
if t.__module__ == typing.__name__:
return first_origin(t)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_generic_bases(type_):
"""Iterates over all generics `type_` derives from, including origins. This function is only necessary because, in typing 3.5.0, a generic doesn't get included in the list of bases when it constructs a parameterized version of itself. This was fixed in aab2c59; now it would be enough to just iterate over the MRO. """
|
for t in type_.__mro__:
if not isinstance(t, typing.GenericMeta):
continue
yield t
t = t.__origin__
while t:
yield t
t = t.__origin__
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sig_cmp(sig1, sig2):
""" Compares two normalized type signatures for validation purposes. """
|
types1 = sig1.required
types2 = sig2.required
if len(types1) != len(types2):
return False
dup_pos = []
dup_kw = {}
for t1, t2 in zip(types1, types2):
match = type_cmp(t1, t2)
if match:
dup_pos.append(match)
else:
break
else:
return tuple(dup_pos)
kw_range = slice(len(dup_pos), len(types1))
kwds1 = sig1.parameters[kw_range]
kwds2 = sig2.parameters[kw_range]
if set(kwds1) != set(kwds2):
return False
kwtypes1 = dict(zip(sig1.parameters, types1))
kwtypes2 = dict(zip(sig2.parameters, types2))
for kw in kwds1:
match = type_cmp(kwtypes1[kw], kwtypes2[kw])
if match:
dup_kw[kw] = match
else:
break
else:
return tuple(dup_pos), dup_kw
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_void(func):
""" Determines if a function is a void function, i.e., one whose body contains nothing but a docstring or an ellipsis. A void function can be used to introduce an overloaded function without actually registering an implementation. """
|
try:
source = dedent(inspect.getsource(func))
except (OSError, IOError):
return False
fdef = next(ast.iter_child_nodes(ast.parse(source)))
return (
type(fdef) is ast.FunctionDef and len(fdef.body) == 1 and
type(fdef.body[0]) is ast.Expr and
type(fdef.body[0].value) in {ast.Str, ast.Ellipsis})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def derive_configuration(cls):
""" Collect the nearest type variables and effective parameters from the type, its bases, and their origins as necessary. """
|
base_params = cls.base.__parameters__
if hasattr(cls.type, '__args__'):
# typing as of commit abefbe4
tvars = {p: p for p in base_params}
types = {}
for t in iter_generic_bases(cls.type):
if t is cls.base:
type_vars = tuple(tvars[p] for p in base_params)
parameters = (types.get(tvar, tvar) for tvar in type_vars)
break
if t.__args__:
for arg, tvar in zip(t.__args__, t.__origin__.__parameters__):
if isinstance(arg, typing.TypeVar):
tvars[tvar] = tvars.get(arg, arg)
else:
types[tvar] = arg
else:
# typing 3.5.0
tvars = [None] * len(base_params)
for t in iter_generic_bases(cls.type):
for i, p in enumerate(t.__parameters__):
if tvars[i] is None and isinstance(p, typing.TypeVar):
tvars[i] = p
if all(tvars):
type_vars = tvars
parameters = cls.type.__parameters__
break
cls.type_vars = type_vars
cls.parameters = tuple(normalize_type(p, 1) for p in parameters)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_intersection(nfa_1: dict, nfa_2: dict) -> dict: """ Returns a NFA that reads the intersection of the NFAs in input. Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ, S_2,S_2^0,ρ_2,F_2)` be two NFAs. There is a NFA :math:`A_∧` that runs simultaneously both :math:`A_1` and :math:`A_2` on the input word, so :math:`L(A_∧) = L(A_1)∩L(A_2)`. It is defined as: :math:`A_∧ = ( Σ , S , S_0 , ρ , F )` where • :math:`S = S_1 × S_2` • :math:`S_0 = S_1^0 × S_2^0` • :math:`F = F_1 × F_2` • :math:`((s,t), a, (s_X , t_X)) ∈ ρ` iff :math:`(s, a,s_X ) ∈ ρ_1` and :math:`(t, a, t_X ) ∈ ρ_2` :param dict nfa_1: first input NFA; :param dict nfa_2: second input NFA; :return: *(dict)* representing the intersected NFA. """
|
intersection = {
'alphabet': nfa_1['alphabet'].intersection(nfa_2['alphabet']),
'states': set(),
'initial_states': set(),
'accepting_states': set(),
'transitions': dict()
}
for init_1 in nfa_1['initial_states']:
for init_2 in nfa_2['initial_states']:
intersection['initial_states'].add((init_1, init_2))
intersection['states'].update(intersection['initial_states'])
boundary = set()
boundary.update(intersection['initial_states'])
while boundary:
(state_nfa_1, state_nfa_2) = boundary.pop()
if state_nfa_1 in nfa_1['accepting_states'] \
and state_nfa_2 in nfa_2['accepting_states']:
intersection['accepting_states'].add((state_nfa_1, state_nfa_2))
for a in intersection['alphabet']:
if (state_nfa_1, a) not in nfa_1['transitions'] \
or (state_nfa_2, a) not in nfa_2['transitions']:
continue
s1 = nfa_1['transitions'][state_nfa_1, a]
s2 = nfa_2['transitions'][state_nfa_2, a]
for destination_1 in s1:
for destination_2 in s2:
next_state = (destination_1, destination_2)
if next_state not in intersection['states']:
intersection['states'].add(next_state)
boundary.add(next_state)
intersection['transitions'].setdefault(
((state_nfa_1, state_nfa_2), a), set()).add(next_state)
if destination_1 in nfa_1['accepting_states'] \
and destination_2 in nfa_2['accepting_states']:
intersection['accepting_states'].add(next_state)
return intersection
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_union(nfa_1: dict, nfa_2: dict) -> dict: """ Returns a NFA that reads the union of the NFAs in input. Let :math:`A_1 = (Σ,S_1,S_1^0,ρ_1,F_1)` and :math:`A_2 =(Σ, S_2,S_2^0,ρ_2,F_2)` be two NFAs. here is a NFA :math:`A_∨` that nondeterministically chooses :math:`A_1` or :math:`A_2` and runs it on the input word. It is defined as: :math:`A_∨ = (Σ, S, S_0 , ρ, F )` where: • :math:`S = S_1 ∪ S_2` • :math:`S_0 = S_1^0 ∪ S_2^0` • :math:`F = F_1 ∪ F_2` • :math:`ρ = ρ_1 ∪ ρ_2` , that is :math:`(s, a, s' ) ∈ ρ` if :math:`[ s ∈ S_1\ and\ (s, a, s' ) ∈ ρ_1 ]` OR :math:`[ s ∈ S_2\ and\ (s, a, s' ) ∈ ρ_2 ]` Pay attention to avoid having the NFAs with state names in common, in case use :mod:`PySimpleAutomata.NFA.rename_nfa_states` function. :param dict nfa_1: first input NFA; :param dict nfa_2: second input NFA. :return: *(dict)* representing the united NFA. """
|
union = {
'alphabet': nfa_1['alphabet'].union(nfa_2['alphabet']),
'states': nfa_1['states'].union(nfa_2['states']),
'initial_states':
nfa_1['initial_states'].union(nfa_2['initial_states']),
'accepting_states':
nfa_1['accepting_states'].union(nfa_2['accepting_states']),
'transitions': nfa_1['transitions'].copy()}
for trans in nfa_2['transitions']:
for elem in nfa_2['transitions'][trans]:
union['transitions'].setdefault(trans, set()).add(elem)
return union
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_determinization(nfa: dict) -> dict: """ Returns a DFA that reads the same language of the input NFA. Let A be an NFA, then there exists a DFA :math:`A_d` such that :math:`L(A_d) = L(A)`. Intuitively, :math:`A_d` collapses all possible runs of A on a given input word into one run over a larger state set. :math:`A_d` is defined as: :math:`A_d = (Σ, 2^S , s_0 , ρ_d , F_d )` where: • :math:`2^S` , i.e., the state set of :math:`A_d` , consists of all sets of states S in A; • :math:`s_0 = S^0` , i.e., the single initial state of :math:`A_d` is the set :math:`S_0` of initial states of A; • :math:`F_d = \{Q | Q ∩ F ≠ ∅\}`, i.e., the collection of sets of states that intersect F nontrivially; • :math:`ρ_d(Q, a) = \{s' | (s,a, s' ) ∈ ρ\ for\ some\ s ∈ Q\}`. :param dict nfa: input NFA. :return: *(dict)* representing a DFA """
|
def state_name(s):
return str(set(sorted(s)))
dfa = {
'alphabet': nfa['alphabet'].copy(),
'initial_state': None,
'states': set(),
'accepting_states': set(),
'transitions': dict()
}
if len(nfa['initial_states']) > 0:
dfa['initial_state'] = state_name(nfa['initial_states'])
dfa['states'].add(state_name(nfa['initial_states']))
sets_states = list()
sets_queue = list()
sets_queue.append(nfa['initial_states'])
sets_states.append(nfa['initial_states'])
if len(sets_states[0].intersection(nfa['accepting_states'])) > 0:
dfa['accepting_states'].add(state_name(sets_states[0]))
while sets_queue:
current_set = sets_queue.pop(0)
for a in dfa['alphabet']:
next_set = set()
for state in current_set:
if (state, a) in nfa['transitions']:
for next_state in nfa['transitions'][state, a]:
next_set.add(next_state)
if len(next_set) == 0:
continue
if next_set not in sets_states:
sets_states.append(next_set)
sets_queue.append(next_set)
dfa['states'].add(state_name(next_set))
if next_set.intersection(nfa['accepting_states']):
dfa['accepting_states'].add(state_name(next_set))
dfa['transitions'][state_name(current_set), a] = state_name(next_set)
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_complementation(nfa: dict) -> dict: """ Returns a DFA reading the complemented language read by input NFA. Complement a nondeterministic automaton is possible complementing the determinization of it. The construction is effective, but it involves an exponential blow-up, since determinization involves an unavoidable exponential blow-up (i.e., if NFA has n states, then the DFA has :math:`2^n` states). :param dict nfa: input NFA. :return: *(dict)* representing a completed DFA. """
|
determinized_nfa = nfa_determinization(nfa)
return DFA.dfa_complementation(determinized_nfa)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_word_acceptance(nfa: dict, word: list) -> bool: """ Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise. """
|
current_level = set()
current_level = current_level.union(nfa['initial_states'])
next_level = set()
for action in word:
for state in current_level:
if (state, action) in nfa['transitions']:
next_level.update(nfa['transitions'][state, action])
if len(next_level) < 1:
return False
current_level = next_level
next_level = set()
if current_level.intersection(nfa['accepting_states']):
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def overwrite_view_source(project, dir_path):
"""In the project's index.html built file, replace the top "source" link with a link to the documentation's home, which is mkdoc's home Args: project (str):
project to update dir_path (pathlib.Path):
this file's path """
|
project_html_location = dir_path / project / HTML_LOCATION
if not project_html_location.exists():
return
files_to_overwrite = [
f for f in project_html_location.iterdir() if "html" in f.suffix
]
for html_file in files_to_overwrite:
with open(html_file, "r") as f:
html = f.readlines()
for i, l in enumerate(html):
if TO_REPLACE_WITH_HOME in l:
html[i] = NEW_HOME_LINK
break
with open(html_file, "w") as f:
f.writelines(html)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_listed_projects():
"""Find the projects listed in the Home Documentation's index.md file Returns: set(str):
projects' names, with the '/' in their beginings """
|
index_path = Path().resolve() / "docs" / "index.md"
with open(index_path, "r") as index_file:
lines = index_file.readlines()
listed_projects = set()
project_section = False
for _, l in enumerate(lines):
idx = l.find(PROJECT_KEY)
if idx >= 0:
project_section = True
if project_section:
# Find first parenthesis after the key
start = l.find("](")
if start > 0:
closing_parenthesis = sorted(
[m.start() for m in re.finditer(r"\)", l) if m.start() > start]
)[0]
project = l[start + 2 : closing_parenthesis]
listed_projects.add(project)
# If the Projects section is over, stop iteration.
# It will stop before seeing ## but wainting for it
# Allows the user to use single # in the projects' descriptions
if len(listed_projects) > 0 and l.startswith("#"):
return listed_projects
return listed_projects
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_offline():
"""Deletes references to the external google fonts in the Home Documentation's index.html file """
|
dir_path = Path(os.getcwd()).absolute()
css_path = dir_path / "site" / "assets" / "stylesheets"
material_css = css_path / "material-style.css"
if not material_css.exists():
file_path = Path(__file__).resolve().parent
copyfile(file_path / "material-style.css", material_css)
copyfile(file_path / "material-icons.woff2", css_path / "material-icons.woff2")
indexes = []
for root, _, filenames in os.walk(dir_path / "site"):
for filename in fnmatch.filter(filenames, "index.html"):
indexes.append(os.path.join(root, filename))
for index_file in indexes:
update_index_to_offline(index_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _filenames_from_arg(filename):
"""Utility function to deal with polymorphic filenames argument."""
|
if isinstance(filename, string_types):
filenames = [filename]
elif isinstance(filename, (list, tuple)):
filenames = filename
else:
raise Exception('filename argument must be string, list or tuple')
for fn in filenames:
if not os.path.exists(fn):
raise ValueError('file not found: %s' % fn)
if not os.path.isfile(fn):
raise ValueError('not a file: %s' % fn)
return filenames
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_cache(vcf_fn, array_type, region, cachedir, compress, log):
"""Utility function to obtain a cache file name and determine whether or not a fresh cache file is available."""
|
# guard condition
if isinstance(vcf_fn, (list, tuple)):
raise Exception(
'caching only supported when loading from a single VCF file'
)
# create cache file name
cache_fn = _mk_cache_fn(vcf_fn, array_type=array_type, region=region,
cachedir=cachedir, compress=compress)
# decide whether or not a fresh cache file is available
# (if not, we will parse the VCF and build array from scratch)
if not os.path.exists(cache_fn):
log('no cache file found')
is_cached = False
elif os.path.getmtime(vcf_fn) > os.path.getmtime(cache_fn):
is_cached = False
log('cache file out of date')
else:
is_cached = True
log('cache file available')
return cache_fn, is_cached
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _variants_fields(fields, exclude_fields, info_ids):
"""Utility function to determine which fields to extract when loading variants."""
|
if fields is None:
# no fields specified by user
# by default extract all standard and INFO fields
fields = config.STANDARD_VARIANT_FIELDS + info_ids
else:
# fields have been specified
for f in fields:
# check for non-standard fields not declared in INFO header
if f not in config.STANDARD_VARIANT_FIELDS and f not in info_ids:
# support extracting INFO even if not declared in header,
# but warn...
print('WARNING: no INFO definition found for field %s' % f,
file=sys.stderr)
# process any exclusions
if exclude_fields is not None:
fields = [f for f in fields if f not in exclude_fields]
return tuple(f for f in fields)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _variants_fills(fields, fills, info_types):
"""Utility function to determine fill values for variants fields with missing values."""
|
if fills is None:
# no fills specified by user
fills = dict()
for f, vcf_type in zip(fields, info_types):
if f == 'FILTER':
fills[f] = False
elif f not in fills:
if f in config.STANDARD_VARIANT_FIELDS:
fills[f] = config.DEFAULT_VARIANT_FILL[f]
else:
fills[f] = config.DEFAULT_FILL_MAP[vcf_type]
# convert to tuple for zipping with fields
fills = tuple(fills[f] for f in fields)
return fills
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _info_transformers(fields, transformers):
"""Utility function to determine transformer functions for variants fields."""
|
if transformers is None:
# no transformers specified by user
transformers = dict()
for f in fields:
if f not in transformers:
transformers[f] = config.DEFAULT_TRANSFORMER.get(f, None)
return tuple(transformers[f] for f in fields)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _variants_dtype(fields, dtypes, arities, filter_ids, flatten_filter, info_types):
"""Utility function to build a numpy dtype for a variants array, given user arguments and information available from VCF header."""
|
dtype = list()
for f, n, vcf_type in zip(fields, arities, info_types):
if f == 'FILTER' and flatten_filter:
# split FILTER into multiple boolean fields
for flt in filter_ids:
nm = 'FILTER_' + flt
dtype.append((nm, 'b1'))
elif f == 'FILTER' and not flatten_filter:
# represent FILTER as a structured field
t = [(flt, 'b1') for flt in filter_ids]
dtype.append((f, t))
else:
if dtypes is not None and f in dtypes:
# user overrides default dtype
t = dtypes[f]
elif f in config.STANDARD_VARIANT_FIELDS:
t = config.DEFAULT_VARIANT_DTYPE[f]
elif f in config.DEFAULT_INFO_DTYPE:
# known INFO field
t = config.DEFAULT_INFO_DTYPE[f]
else:
t = config.DEFAULT_TYPE_MAP[vcf_type]
# deal with arity
if n == 1:
dtype.append((f, t))
else:
dtype.append((f, t, (n,)))
return dtype
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fromiter(it, dtype, count, progress, log):
"""Utility function to load an array from an iterator."""
|
if progress > 0:
it = _iter_withprogress(it, progress, log)
if count is not None:
a = np.fromiter(it, dtype=dtype, count=count)
else:
a = np.fromiter(it, dtype=dtype)
return a
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _iter_withprogress(iterable, progress, log):
"""Utility function to load an array from an iterator, reporting progress as we go."""
|
before_all = time.time()
before = before_all
n = 0
for i, o in enumerate(iterable):
yield o
n = i+1
if n % progress == 0:
after = time.time()
log('%s rows in %.2fs; batch in %.2fs (%d rows/s)'
% (n, after-before_all, after-before, progress/(after-before)))
before = after
after_all = time.time()
log('%s rows in %.2fs (%d rows/s)'
% (n, after_all-before_all, n/(after_all-before_all)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calldata(vcf_fn, region=None, samples=None, ploidy=2, fields=None, exclude_fields=None, dtypes=None, arities=None, fills=None, vcf_types=None, count=None, progress=0, logstream=None, condition=None, slice_args=None, verbose=True, cache=False, cachedir=None, skip_cached=False, compress_cache=False, truncate=True):
""" Load a numpy 1-dimensional structured array with data from the sample columns of a VCF file. Parameters vcf_fn: string or list Name of the VCF file or list of file names. region: string Region to extract, e.g., 'chr1' or 'chr1:0-100000'. fields: list or array-like List of fields to extract from the VCF. exclude_fields: list or array-like Fields to exclude from extraction. dtypes: dict or dict-like Dictionary cotaining dtypes to use instead of the default inferred ones arities: dict or dict-like Override the amount of values to expect. fills: dict or dict-like Dictionary containing field:fillvalue mappings used to override the default fill in values in VCF fields. vcf_types: dict or dict-like Dictionary containing field:string mappings used to override any bogus type declarations in the VCF header. count: int Attempt to extract a specific number of records. progress: int If greater than 0, log parsing progress. logstream: file or file-like object Stream to use for logging progress. condition: array Boolean array defining which rows to load. slice_args: tuple or list Slice of the underlying iterator, e.g., (0, 1000, 10) takes every 10th row from the first 1000. verbose: bool Log more messages. cache: bool If True, save the resulting numpy array to disk, and load from the cache if present rather than rebuilding from the VCF. cachedir: string Manually specify the directory to use to store cache files. skip_cached: bool If True and cache file is fresh, do not load and return None. compress_cache: bool, optional If True, compress the cache file. truncate: bool, optional If True (default) only include variants whose start position is within the given region. If False, use default tabix behaviour. Examples -------- array([ ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])), ((True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])), ((True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])), ((True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])), ((True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])), ((True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])), ((True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])), ((True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0]))], dtype=[('NA00001', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00002', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]), ('NA00003', [('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))])]) array([(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, False, [0, -1], 0, 0, b'0', [0, 0])], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) array([[(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, True, [0, 0], 0, 0, b'0|0', [10, 10]), (True, False, [0, 1], 0, 0, b'0/1', [3, 3])], [(True, True, [0, 0], 1, 48, b'0|0', [51, 51]), (True, True, [1, 0], 8, 48, b'1|0', [51, 51]), (True, False, [1, 1], 5, 43, b'1/1', [0, 0])], [(True, True, [0, 0], 3, 49, b'0|0', [58, 50]), (True, True, [0, 1], 5, 3, b'0|1', [65, 3]), (True, False, [0, 0], 3, 41, b'0/0', [0, 0])], [(True, True, [1, 2], 6, 21, b'1|2', [23, 27]), (True, True, [2, 1], 0, 2, b'2|1', [18, 2]), (True, False, [2, 2], 4, 35, b'2/2', [0, 0])], [(True, True, [0, 0], 0, 54, b'0|0', [56, 60]), (True, True, [0, 0], 4, 48, b'0|0', [51, 51]), (True, False, [0, 0], 2, 61, b'0/0', [0, 0])], [(True, False, [0, 1], 4, 0, b'0/1', [0, 0]), (True, False, [0, 2], 2, 17, b'0/2', [0, 0]), (False, False, [-1, -1], 3, 40, b'./.', [0, 0])], [(True, False, [0, 0], 0, 0, b'0/0', [0, 0]), (True, True, [0, 0], 0, 0, b'0|0', [0, 0]), (False, False, [-1, -1], 0, 0, b'./.', [0, 0])], [(True, False, [0, -1], 0, 0, b'0', [0, 0]), (True, False, [0, 1], 0, 0, b'0/1', [0, 0]), (True, True, [0, 2], 0, 0, b'0|2', [0, 0])]], dtype=[('is_called', '?'), ('is_phased', '?'), ('genotype', 'i1', (2,)), ('DP', '<u2'), ('GQ', 'u1'), ('GT', 'S3'), ('HQ', '<i4', (2,))]) array([[[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 0, 0], [ 0, 1]], [[ 0, 0], [ 1, 0], [ 1, 1]], [[ 0, 0], [ 0, 1], [ 0, 0]], [[ 1, 2], [ 2, 1], [ 2, 2]], [[ 0, 0], [ 0, 0], [ 0, 0]], [[ 0, 1], [ 0, 2], [-1, -1]], [[ 0, 0], [ 0, 0], [-1, -1]], [[ 0, -1], [ 0, 1], [ 0, 2]]], dtype=int8) array([[0, 0], [0, 1], [0, 0]], dtype=int8) """
|
# flake8: noqa
loader = _CalldataLoader(vcf_fn, region=region, samples=samples,
ploidy=ploidy, fields=fields,
exclude_fields=exclude_fields, dtypes=dtypes,
arities=arities, fills=fills, vcf_types=vcf_types,
count=count, progress=progress,
logstream=logstream, condition=condition,
slice_args=slice_args, verbose=verbose,
cache=cache, cachedir=cachedir,
skip_cached=skip_cached,
compress_cache=compress_cache,
truncate=truncate)
arr = loader.load()
return arr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_datetimenow(self):
""" get datetime now according to USE_TZ and default time """
|
value = timezone.datetime.utcnow()
if settings.USE_TZ:
value = timezone.localtime(
timezone.make_aware(value, timezone.utc),
timezone.get_default_timezone()
)
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_default_timezone_datetime(self, value):
""" convert to default timezone datetime """
|
return timezone.localtime(self.to_utc_datetime(value), timezone.get_default_timezone())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_json_importer(input_file: str) -> dict: """ Imports a DFA from a JSON file. :param str input_file: path + filename to json file; :return: *(dict)* representing a DFA. """
|
file = open(input_file)
json_file = json.load(file)
transitions = {} # key [state ∈ states, action ∈ alphabet]
# value [arriving state ∈ states]
for (origin, action, destination) in json_file['transitions']:
transitions[origin, action] = destination
dfa = {
'alphabet': set(json_file['alphabet']),
'states': set(json_file['states']),
'initial_state': json_file['initial_state'],
'accepting_states': set(json_file['accepting_states']),
'transitions': transitions
}
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_to_json(dfa: dict, name: str, path: str = './'):
""" Exports a DFA to a JSON file. If *path* do not exists, it will be created. :param dict dfa: DFA to export; :param str name: name of the output file; :param str path: path where to save the JSON file (default: working directory) """
|
out = {
'alphabet': list(dfa['alphabet']),
'states': list(dfa['states']),
'initial_state': dfa['initial_state'],
'accepting_states': list(dfa['accepting_states']),
'transitions': list()
}
for t in dfa['transitions']:
out['transitions'].append(
[t[0], t[1], dfa['transitions'][t]])
if not os.path.exists(path):
os.makedirs(path)
file = open(os.path.join(path, name + '.json'), 'w')
json.dump(out, file, sort_keys=True, indent=4)
file.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfa_dot_importer(input_file: str) -> dict: """ Imports a DFA from a DOT file. Of DOT files are recognized the following attributes: • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fake [style=invisible] -> dummy invisible node pointing to initial state (they will be skipped); • fake-> S [style=bold] -> dummy transition to draw the arrow pointing to initial state (it will be skipped). Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state; • 'sink' used as additional state when completing a DFA; • 'None' used when no initial state is present. Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: path to the DOT file; :return: *(dict)* representing a DFA. """
|
# pyDot Object
g = pydot.graph_from_dot_file(input_file)[0]
states = set()
initial_state = None
accepting_states = set()
replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''}
for node in g.get_nodes():
if node.get_name() == 'fake' \
or node.get_name() == 'None' \
or node.get_name() == 'graph' \
or node.get_name() == 'node':
continue
if 'style' in node.get_attributes() \
and node.get_attributes()['style'] == 'invisible':
continue
node_reference = __replace_all(replacements,
node.get_name()).split(',')
if len(node_reference) > 1:
node_reference = tuple(node_reference)
else:
node_reference = node_reference[0]
states.add(node_reference)
for attribute in node.get_attributes():
if attribute == 'root':
initial_state = node_reference
if attribute == 'shape' and node.get_attributes()[
'shape'] == 'doublecircle':
accepting_states.add(node_reference)
alphabet = set()
transitions = {}
for edge in g.get_edges():
if edge.get_source() == 'fake':
continue
label = __replace_all(replacements, edge.get_label())
alphabet.add(label)
source = __replace_all(replacements,
edge.get_source()).split(',')
if len(source) > 1:
source = tuple(source)
else:
source = source[0]
destination = __replace_all(replacements,
edge.get_destination()).split(',')
if len(destination) > 1:
destination = tuple(destination)
else:
destination = destination[0]
transitions[source, label] = destination
dfa = {
'alphabet': alphabet,
'states': states,
'initial_state': initial_state,
'accepting_states': accepting_states,
'transitions': transitions}
return dfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_json_importer(input_file: str) -> dict: """ Imports a NFA from a JSON file. :param str input_file: path+filename to JSON file; :return: *(dict)* representing a NFA. """
|
file = open(input_file)
json_file = json.load(file)
transitions = {} # key [state in states, action in alphabet]
# value [Set of arriving states in states]
for p in json_file['transitions']:
transitions.setdefault((p[0], p[1]), set()).add(p[2])
nfa = {
'alphabet': set(json_file['alphabet']),
'states': set(json_file['states']),
'initial_states': set(json_file['initial_states']),
'accepting_states': set(json_file['accepting_states']),
'transitions': transitions
}
return nfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_to_json(nfa: dict, name: str, path: str = './'):
""" Exports a NFA to a JSON file. :param dict nfa: NFA to export; :param str name: name of the output file; :param str path: path where to save the JSON file (default: working directory). """
|
transitions = list() # key[state in states, action in alphabet]
# value [Set of arriving states in states]
for p in nfa['transitions']:
for dest in nfa['transitions'][p]:
transitions.append([p[0], p[1], dest])
out = {
'alphabet': list(nfa['alphabet']),
'states': list(nfa['states']),
'initial_states': list(nfa['initial_states']),
'accepting_states': list(nfa['accepting_states']),
'transitions': transitions
}
if not os.path.exists(path):
os.makedirs(path)
file = open(os.path.join(path, name + '.json'), 'w')
json.dump(out, file, sort_keys=True, indent=4)
file.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_dot_importer(input_file: str) -> dict: """ Imports a NFA from a DOT file. Of .dot files are recognized the following attributes • nodeX shape=doublecircle -> accepting node; • nodeX root=true -> initial node; • edgeX label="a" -> action in alphabet; • fakeX style=invisible -> dummy invisible nodes pointing to initial state (it will be skipped); • fakeX->S [style=bold] -> dummy transitions to draw arrows pointing to initial states (they will be skipped). All invisible nodes are skipped. Forbidden names: • 'fake' used for graphical purpose to drawn the arrow of the initial state • 'sink' used as additional state when completing a NFA Forbidden characters: • " • ' • ( • ) • spaces :param str input_file: Path to input DOT file; :return: *(dict)* representing a NFA. """
|
# pyDot Object
g = pydot.graph_from_dot_file(input_file)[0]
states = set()
initial_states = set()
accepting_states = set()
replacements = {'"': '', "'": '', '(': '', ')': '', ' ': ''}
for node in g.get_nodes():
attributes = node.get_attributes()
if node.get_name() == 'fake' \
or node.get_name() == 'None' \
or node.get_name() == 'graph' \
or node.get_name() == 'node':
continue
if 'style' in attributes \
and attributes['style'] == 'invisible':
continue
node_reference = __replace_all(replacements,
node.get_name()).split(',')
if len(node_reference) > 1:
node_reference = tuple(node_reference)
else:
node_reference = node_reference[0]
states.add(node_reference)
for attribute in attributes:
if attribute == 'root':
initial_states.add(node_reference)
if attribute == 'shape' \
and attributes['shape'] == 'doublecircle':
accepting_states.add(node_reference)
alphabet = set()
transitions = {}
for edge in g.get_edges():
source = __replace_all(replacements,
edge.get_source()).split(',')
if len(source) > 1:
source = tuple(source)
else:
source = source[0]
destination = __replace_all(replacements,
edge.get_destination()).split(',')
if len(destination) > 1:
destination = tuple(destination)
else:
destination = destination[0]
if source not in states or destination not in states:
continue
label = __replace_all(replacements, edge.get_label())
alphabet.add(label)
transitions.setdefault((source, label), set()).add(
destination)
nfa = {
'alphabet': alphabet,
'states': states,
'initial_states': initial_states,
'accepting_states': accepting_states,
'transitions': transitions
}
return nfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_json_importer(input_file: str) -> dict: """ Imports a AFW from a JSON file. :param str input_file: path+filename to input JSON file; :return: *(dict)* representing a AFW. """
|
file = open(input_file)
json_file = json.load(file)
transitions = {} # key [state in states, action in alphabet]
# value [string representing boolean expression]
for p in json_file['transitions']:
transitions[p[0], p[1]] = p[2]
# return map
afw = {
'alphabet': set(json_file['alphabet']),
'states': set(json_file['states']),
'initial_state': json_file['initial_state'],
'accepting_states': set(json_file['accepting_states']),
'transitions': transitions
}
return afw
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __recursive_acceptance(afw, state, remaining_word):
""" Recursive call for word acceptance. :param dict afw: input AFW; :param str state: current state; :param list remaining_word: list containing the remaining words. :return: *(bool)*, True if the word is accepted, false otherwise. """
|
# the word is accepted only if all the final states are
# accepting states
if len(remaining_word) == 0:
if state in afw['accepting_states']:
return True
else:
return False
action = remaining_word[0]
if (state, action) not in afw['transitions']:
return False
if afw['transitions'][state, action] == 'True':
return True
elif afw['transitions'][state, action] == 'False':
return False
transition = (state, action)
# extract from the boolean formula of the transition the
# states involved in it
involved_states = list(
set(
re.findall(r"[\w']+", afw['transitions'][transition])
).difference({'and', 'or', 'True', 'False'})
)
possible_assignments = set(
itertools.product([True, False], repeat=len(involved_states)))
# For all possible assignment of the the transition (a
# boolean formula over the states)
for assignment in possible_assignments:
mapping = dict(zip(involved_states, assignment))
# If the assignment evaluation is positive
if eval(afw['transitions'][transition], mapping):
ok = True
mapping.pop('__builtins__') # removes useless entry
# added by the function eval()
# Check if the word is accepted in ALL the states
# mapped to True by the assignment
for mapped_state in mapping:
if mapping[mapped_state] == False:
continue
if not __recursive_acceptance(afw,
mapped_state,
remaining_word[1:]):
# if one positive state of the assignment
# doesn't accepts the word,the whole
# assignment is discarded
ok = False
break
if ok:
# If at least one assignment accepts the word,
# the word is accepted by the afw
return True
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_completion(afw):
""" Side effect on input! Complete the afw adding not present transitions and marking them as False. :param dict afw: input AFW. """
|
for state in afw['states']:
for a in afw['alphabet']:
if (state, a) not in afw['transitions']:
afw['transitions'][state, a] = 'False'
return afw
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nfa_to_afw_conversion(nfa: dict) -> dict: """ Returns a AFW reading the same language of input NFA. Let :math:`A = (Σ,S,S^0, ρ,F)` be an nfa. Then we define the afw AA such that :math:`L(AA) = L(A)` as follows :math:`AA = (Σ, S ∪ {s_0}, s_0 , ρ_A , F )` where :math:`s_0` is a new state and :math:`ρ_A` is defined as follows: • :math:`ρ_A(s, a)= ⋁_{(s,a,s')∈ρ}s'`, for all :math:`a ∈ Σ` and :math:`s ∈ S` • :math:`ρ_A(s^0, a)= ⋁_{s∈S^0,(s,a,s')∈ρ}s'`, for all :math:`a ∈ Σ` We take an empty disjunction in the definition of AA to be equivalent to false. Essentially, the transitions of A are viewed as disjunctions in AA . A special treatment is needed for the initial state, since we allow a set of initial states in nondeterministic automata, but only a single initial state in alternating automata. :param dict nfa: input NFA. :return: *(dict)* representing a AFW. """
|
afw = {
'alphabet': nfa['alphabet'].copy(),
'states': nfa['states'].copy(),
'initial_state': 'root',
'accepting_states': nfa['accepting_states'].copy(),
'transitions': dict()
}
# Make sure "root" node doesn't already exists, in case rename it
i = 0
while afw['initial_state'] in nfa['states']:
afw['initial_state'] = 'root' + str(i)
i += 1
afw['states'].add(afw['initial_state'])
for (state, action) in nfa['transitions']:
boolean_formula = str()
for destination in nfa['transitions'][state, action]:
boolean_formula += destination + ' or '
# strip last ' or ' from the formula string
boolean_formula = boolean_formula[0:-4]
afw['transitions'][state, action] = boolean_formula
if state in nfa['initial_states']:
afw['transitions'][afw['initial_state'], action] = boolean_formula
return afw
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_to_nfa_conversion(afw: dict) -> dict: """ Returns a NFA reading the same language of input AFW. Let :math:`A = (Σ, S, s^0 , ρ, F )` be an afw. Then we define the nfa :math:`A_N` such that :math:`L(A_N) = L(A)` as follows :math:`AN = (Σ, S_N , S^0_N , ρ_N , F_N )` where: • :math:`S_N = 2^S` • :math:`S^0_N= \{\{s^0 \}\}` • :math:`F_N=2^F` • :math:`(Q,a,Q') ∈ ρ_N` iff :math:`Q'` satisfies :math:`⋀_{ s∈Q} ρ(s, a)` We take an empty conjunction in the definition of :math:`ρ_N` to be equivalent to true; thus, :math:`(∅, a, ∅) ∈ ρ_N`. :param dict afw: input AFW. :return: *(dict)* representing a NFA. """
|
nfa = {
'alphabet': afw['alphabet'].copy(),
'initial_states': {(afw['initial_state'],)},
'states': {(afw['initial_state'],)},
'accepting_states': set(),
'transitions': dict()
}
# State of the NFA are composed by the union of more states of the AFW
boundary = deepcopy(nfa['states'])
possible_assignments = set(
itertools.product([True, False], repeat=len(afw['states'])))
while boundary:
state = boundary.pop()
# The state is accepting only if composed exclusively of final states
if set(state).issubset(afw['accepting_states']):
nfa['accepting_states'].add(state)
for action in nfa['alphabet']:
boolean_formula = 'True'
# join the boolean formulas of the single states given the action
for s in state:
if (s, action) not in afw['transitions']:
boolean_formula += ' and False'
else:
boolean_formula += \
' and (' + \
afw['transitions'][s, action] + \
')'
for assignment in possible_assignments:
mapping = dict(zip(afw['states'], assignment))
# If the formula is satisfied
if eval(boolean_formula, mapping):
# add the transition to the resulting NFA
evaluation = \
tuple(k for k in mapping if mapping[k] is True)
if evaluation not in nfa['states']:
nfa['states'].add(evaluation)
boundary.add(evaluation)
nfa['transitions'].setdefault(
(state, action), set()).add(evaluation)
return nfa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formula_dual(input_formula: str) -> str: """ Returns the dual of the input formula. The dual operation on formulas in :math:`B^+(X)` is defined as: the dual :math:`\overline{θ}` of a formula :math:`θ` is obtained from θ by switching :math:`∧` and :math:`∨`, and by switching :math:`true` and :math:`false`. :param str input_formula: original string. :return: *(str)*, dual of input formula. """
|
conversion_dictionary = {
'and': 'or',
'or': 'and',
'True': 'False',
'False': 'True'
}
return re.sub(
'|'.join(re.escape(key) for key in conversion_dictionary.keys()),
lambda k: conversion_dictionary[k.group(0)], input_formula)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_complementation(afw: dict) -> dict: """ Returns a AFW reading the complemented language read by input AFW. Let :math:`A = (Σ, S, s^0 , ρ, F )`. Define :math:`Ā = (Σ, S, s^0 , \overline{ρ}, S − F )`, where :math:`\overline{ρ}(s, a) = \overline{ρ(s, a)}` for all :math:`s ∈ S` and :math:`a ∈ Σ`. That is, :math:`\overline{ρ}` is the dualized transition function. It can be shown that :math:`L( Ā) = Σ^∗ − L(A)`. The input afw need to be completed i.e. each non existing transition must be added pointing to False. :param dict afw: input AFW. :return: *(dict)* representing a AFW. """
|
completed_input = afw_completion(deepcopy(afw))
complemented_afw = {
'alphabet': completed_input['alphabet'],
'states': completed_input['states'],
'initial_state': completed_input['initial_state'],
'accepting_states':
completed_input['states'].difference(afw['accepting_states']),
'transitions': dict()
}
for transition in completed_input['transitions']:
complemented_afw['transitions'][transition] = \
formula_dual(completed_input['transitions'][transition])
return complemented_afw
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_union(afw_1: dict, afw_2: dict) -> dict: """ Returns a AFW that reads the union of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∪ = (Σ, S_1 ∪ S_2 ∪ {root}, ρ_∪ , root , F_1 ∪ F_2 )` with :math:`ρ_∪ = ρ_1 ∪ ρ_2 ∪ [(root, a):
ρ(s^0_1 , a) ∨ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∪ L(A_2)`. Pay attention to avoid having the AFWs with state names in common, in case use :mod:`PySimpleAutomata.AFW.rename_afw_states` function. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW;. :return: *(dict)* representing the united AFW. """
|
# make sure new root state is unique
initial_state = 'root'
i = 0
while initial_state in afw_1['states'] or initial_state in afw_2['states']:
initial_state = 'root' + str(i)
i += 1
union = {
'alphabet': afw_1['alphabet'].union(afw_2['alphabet']),
'states':
afw_1['states'].union(afw_2['states']).union({initial_state}),
'initial_state': initial_state,
'accepting_states':
afw_1['accepting_states'].union(afw_2['accepting_states']),
'transitions': deepcopy(afw_1['transitions'])
}
# add also afw_2 transitions
union['transitions'].update(afw_2['transitions'])
# if just one initial state is accepting, so the new one is
if afw_1['initial_state'] in afw_1['accepting_states'] \
or afw_2['initial_state'] in afw_2['accepting_states']:
union['accepting_states'].add(union['initial_state'])
# copy all transitions of initial states and eventually their conjunction
# into the new initial state
for action in union['alphabet']:
if (afw_1['initial_state'], action) in afw_1['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_1['transitions'][afw_1['initial_state'], action] + \
')'
if (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] += \
' or (' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
elif (afw_2['initial_state'], action) in afw_2['transitions']:
union['transitions'][initial_state, action] = \
'(' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
return union
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def afw_intersection(afw_1: dict, afw_2: dict) -> dict: """ Returns a AFW that reads the intersection of the languages read by input AFWs. Let :math:`A_1 = (Σ, S_1 , s^0_1, ρ_1 , F_1 )` and :math:`A_2 = (Σ, S_2 , s^0_2, ρ_2 , F_2 )` be alternating automata accepting the languages :math:`L( A_1)` and :math:`L(A_2)`. Then, :math:`B_∩ = (Σ, S_1 ∪ S_2 ∪ {root}, root, ρ_∩ , F_1 ∪ F_2 )` with :math:`ρ_∩ = ρ_1 ∪ ρ_2 ∪ [(root, a):
ρ(s^0_1 , a) ∧ ρ(s^0_2 , a)]` accepts :math:`L(A_1) ∩ L(A_2)`. :param dict afw_1: first input AFW; :param dict afw_2: second input AFW. :return: *(dict)* representing a AFW. """
|
# make sure new root state is unique
initial_state = 'root'
i = 0
while initial_state in afw_1['states'] or initial_state in afw_2['states']:
initial_state = 'root' + str(i)
i += 1
intersection = {
'alphabet': afw_1['alphabet'].union(afw_2['alphabet']),
'states':
afw_1['states'].union(afw_2['states']).union({initial_state}),
'initial_state': initial_state,
'accepting_states':
afw_1['accepting_states'].union(afw_2['accepting_states']),
'transitions': deepcopy(afw_1['transitions'])
}
# add also afw_2 transitions
intersection['transitions'].update(afw_2['transitions'])
# if both initial states are accepting, so the new one is
if afw_1['initial_state'] in afw_1['accepting_states'] \
and afw_2['initial_state'] in afw_2['accepting_states']:
intersection['accepting_states'].add(
intersection['initial_state'])
# New initial state transitions will be the conjunction of
# precedent inital states ones
for action in intersection['alphabet']:
if (afw_1['initial_state'], action) in afw_1['transitions']:
intersection['transitions'][initial_state, action] = \
'(' + \
afw_1['transitions'][afw_1['initial_state'], action] + \
')'
if (afw_2['initial_state'], action) in afw_2['transitions']:
intersection['transitions'][initial_state, action] += \
' and (' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
else:
intersection['transitions'][
initial_state, action] += ' and False'
elif (afw_2['initial_state'], action) in afw_2['transitions']:
intersection['transitions'][initial_state, action] = \
'False and (' + \
afw_2['transitions'][afw_2['initial_state'], action] + \
')'
return intersection
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def translate_to_dbus_type(typeof, value):
""" Helper function to map values from their native Python types to Dbus types. :param type typeof: Target for type conversion e.g., 'dbus.Dictionary' :param value: Value to assign using type 'typeof' :return: 'value' converted to type 'typeof' :rtype: typeof """
|
if ((isinstance(value, types.UnicodeType) or
isinstance(value, str)) and typeof is not dbus.String):
# FIXME: This is potentially dangerous since it evaluates
# a string in-situ
return typeof(eval(value))
else:
return typeof(value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def signal_handler(self, *args):
""" Method to call in order to invoke the user callback. :param args: list of signal-dependent arguments :return: """
|
self.user_callback(self.signal, self.user_arg, *args)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_property(self, name=None):
""" Helper to get a property value by name or all properties as a dictionary. See also :py:meth:`set_property` :param str name: defaults to None which means all properties in the object's dictionary are returned as a dict. Otherwise, the property name key is used and its value is returned. :return: Property value by property key, or a dictionary of all properties :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """
|
if (name):
return self._interface.GetProperties()[name]
else:
return self._interface.GetProperties()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_property(self, name, value):
""" Helper to set a property value by name, translating to correct dbus type See also :py:meth:`get_property` :param str name: The property name in the object's dictionary whose value shall be set. :param value: Properties new value to be assigned. :return: :raises KeyError: if the property key is not found in the object's dictionary :raises dbus.Exception: org.bluez.Error.DoesNotExist :raises dbus.Exception: org.bluez.Error.InvalidArguments """
|
typeof = type(self.get_property(name))
self._interface.SetProperty(name,
translate_to_dbus_type(typeof, value))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default(self, o):
""" Encode JSON. :return str: A JSON encoded string """
|
if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
return o.isoformat()
if isinstance(o, decimal.Decimal):
return float(o)
return json.JSONEncoder.default(self, o)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_document(cls, instance, fields_own=None, fields_to_many=None):
""" Get document for model_instance. redefine dump rule for field x: def dump_document_x :param django.db.models.Model instance: model instance :param list<Field> or None fields: model_instance field to dump :return dict: document Related documents are not included to current one. In case of to-many field serialization ensure that models_instance has been select_related so, no database calls would be executed. Method ensures that document has cls.Meta.fieldnames_include and does not have cls.Meta.fieldnames_exclude Steps: 1) fieldnames_include could be properties, but not related models. Add them to fields_own. """
|
if fields_own is not None:
fields_own = {f.name for f in fields_own}
else:
fields_own = {
f.name for f in instance._meta.fields
if f.rel is None and f.serialize
}
fields_own.add('id')
fields_own = (fields_own | set(cls.Meta.fieldnames_include))\
- set(cls.Meta.fieldnames_exclude)
document = {}
# Include own fields
for fieldname in fields_own:
field_serializer = getattr(
cls, "dump_document_{}".format(fieldname), None)
if field_serializer is not None:
value = field_serializer(instance)
else:
value = getattr(instance, fieldname)
try:
field = instance._meta.get_field(fieldname)
except models.fields.FieldDoesNotExist:
# Field is property, value already calculated
pass
else:
if isinstance(field, models.fields.files.FileField):
# TODO: Serializer depends on API here.
value = cls.Meta.api.base_url + value.url
elif isinstance(field, models.CommaSeparatedIntegerField):
value = [v for v in value]
document[fieldname] = value
# Include to-one fields. It does not require database calls
for field in instance._meta.fields:
fieldname = "{}_id".format(field.name)
# NOTE: check field is not related to parent model to exclude
# <class>_ptr fields. OneToOne relationship field.rel.multiple =
# False. Here make sure relationship is to parent model.
if field.rel and not field.rel.multiple \
and isinstance(instance, field.rel.to):
continue
if field.rel and fieldname not in cls.Meta.fieldnames_exclude:
document["links"] = document.get("links") or {}
document["links"][field.name] = getattr(instance, fieldname)
# Include to-many fields. It requires database calls. At this point we
# assume that model was prefetch_related with child objects, which would
# be included into 'linked' attribute. Here we need to add ids of linked
# objects. To avoid database calls, iterate over objects manually and
# get ids.
fields_to_many = fields_to_many or []
for field in fields_to_many:
document["links"] = document.get("links") or {}
document["links"][field.related_resource_name] = [
obj.id for obj in getattr(instance, field.name).all()]
return document
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cached(f):
""" Decorator that makes a method cached."""
|
attr_name = '_cached_' + f.__name__
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, attr_name):
setattr(obj, attr_name, f(obj, *args, **kwargs))
return getattr(obj, attr_name)
return wrapper
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _filter_child_model_fields(cls, fields):
""" Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields. """
|
indexes_to_remove = set([])
for index1, field1 in enumerate(fields):
for index2, field2 in enumerate(fields):
if index1 < index2 and index1 not in indexes_to_remove and\
index2 not in indexes_to_remove:
if issubclass(field1.related_model, field2.related_model):
indexes_to_remove.add(index1)
if issubclass(field2.related_model, field1.related_model):
indexes_to_remove.add(index2)
fields = [field for index, field in enumerate(fields)
if index not in indexes_to_remove]
return fields
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_session():
""" This endpoint appears to be required in order to keep pubnub updates flowing for some user. This just posts a random nonce to the /users/me/session endpoint and returns the result. """
|
url_string = "{}/users/me/session".format(WinkApiInterface.BASE_URL)
nonce = ''.join([str(random.randint(0, 9)) for i in range(9)])
_json = {"nonce": str(nonce)}
try:
arequest = requests.post(url_string,
data=json.dumps(_json),
headers=API_HEADERS)
response_json = arequest.json()
return response_json
except requests.exceptions.RequestException:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_device_state(self, device, state, id_override=None, type_override=None):
""" Set device state via online API. Args: device (WinkDevice):
The device the change is being requested for. state (Dict):
The state being requested. id_override (String, optional):
A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional):
Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict):
The API's response in dictionary format """
|
_LOGGER.info("Setting state via online API")
object_id = id_override or device.object_id()
object_type = type_override or device.object_type()
url_string = "{}/{}s/{}".format(self.BASE_URL,
object_type,
object_id)
if state is None or object_type == "group":
url_string += "/activate"
if state is None:
arequest = requests.post(url_string,
headers=API_HEADERS)
else:
arequest = requests.post(url_string,
data=json.dumps(state),
headers=API_HEADERS)
else:
arequest = requests.put(url_string,
data=json.dumps(state),
headers=API_HEADERS)
if arequest.status_code == 401:
new_token = refresh_access_token()
if new_token:
arequest = requests.put(url_string,
data=json.dumps(state),
headers=API_HEADERS)
else:
raise WinkAPIException("Failed to refresh access token.")
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def local_set_state(self, device, state, id_override=None, type_override=None):
""" Set device state via local API, and fall back to online API. Args: device (WinkDevice):
The device the change is being requested for. state (Dict):
The state being requested. id_override (String, optional):
A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional):
Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict):
The API's response in dictionary format """
|
if ALLOW_LOCAL_CONTROL:
if device.local_id() is not None:
hub = HUBS.get(device.hub_id())
if hub is None or hub["token"] is None:
return self.set_device_state(device, state, id_override, type_override)
else:
return self.set_device_state(device, state, id_override, type_override)
_LOGGER.info("Setting local state")
local_id = id_override or device.local_id().split(".")[0]
object_type = type_override or device.object_type()
LOCAL_API_HEADERS['Authorization'] = "Bearer " + hub["token"]
url_string = "https://{}:8888/{}s/{}".format(hub["ip"],
object_type,
local_id)
try:
arequest = requests.put(url_string,
data=json.dumps(state),
headers=LOCAL_API_HEADERS,
verify=False, timeout=3)
except requests.exceptions.RequestException:
_LOGGER.error("Error sending local control request. Sending request online")
return self.set_device_state(device, state, id_override, type_override)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
temp_state = device.json_state
for key, value in response_json["data"]["last_reading"].items():
temp_state["last_reading"][key] = value
return temp_state
else:
return self.set_device_state(device, state, id_override, type_override)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_device_state(self, device, id_override=None, type_override=None):
""" Get device state via online API. Args: device (WinkDevice):
The device the change is being requested for. id_override (String, optional):
A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional):
Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict):
The API's response in dictionary format """
|
_LOGGER.info("Getting state via online API")
object_id = id_override or device.object_id()
object_type = type_override or device.object_type()
url_string = "{}/{}s/{}".format(self.BASE_URL,
object_type, object_id)
arequest = requests.get(url_string, headers=API_HEADERS)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
return response_json
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def local_get_state(self, device, id_override=None, type_override=None):
""" Get device state via local API, and fall back to online API. Args: device (WinkDevice):
The device the change is being requested for. id_override (String, optional):
A device ID used to override the passed in device's ID. Used to make changes on sub-devices. i.e. Outlet in a Powerstrip. The Parent device's ID. type_override (String, optional):
Used to override the device type when a device inherits from a device other than WinkDevice. Returns: response_json (Dict):
The API's response in dictionary format """
|
if ALLOW_LOCAL_CONTROL:
if device.local_id() is not None:
hub = HUBS.get(device.hub_id())
if hub is not None and hub["token"] is not None:
ip = hub["ip"]
access_token = hub["token"]
else:
return self.get_device_state(device, id_override, type_override)
else:
return self.get_device_state(device, id_override, type_override)
_LOGGER.info("Getting local state")
local_id = id_override or device.local_id()
object_type = type_override or device.object_type()
LOCAL_API_HEADERS['Authorization'] = "Bearer " + access_token
url_string = "https://{}:8888/{}s/{}".format(ip,
object_type,
local_id)
try:
arequest = requests.get(url_string,
headers=LOCAL_API_HEADERS,
verify=False, timeout=3)
except requests.exceptions.RequestException:
_LOGGER.error("Error sending local control request. Sending request online")
return self.get_device_state(device, id_override, type_override)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
temp_state = device.json_state
for key, value in response_json["data"]["last_reading"].items():
temp_state["last_reading"][key] = value
return temp_state
else:
return self.get_device_state(device, id_override, type_override)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.