text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns True if taps aff for this location
<END_TASK>
<USER_TASK:>
Description:
def is_taps_aff(self):
"""Returns True if taps aff for this location""" |
request = requests.get('https://www.taps-aff.co.uk/api/%s' % self.location)
if request.status_code == 200:
try:
taps = request.json()['taps']['status']
if taps == 'aff':
return True
elif taps == 'oan':
return False
else:
raise RuntimeError("Unexpected taps value: %s" % taps)
except ValueError:
raise RuntimeError("Unexpected response from service")
else:
raise IOError("Failure downloading from Api") |
<SYSTEM_TASK:>
Builds response headers and in process renders templates, if any.
<END_TASK>
<USER_TASK:>
Description:
def send_head(self):
"""Builds response headers and in process renders templates, if any.
This method overrides SimpleHTTPRequestHandlet.send_head()
""" |
path = self.translate_path(self.path)
f = None
to_render = False
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
else:
# check if URL corresponds to a template to render
if path.endswith("/"):
index = path[:-1]
else:
index = path
for ext in '.html', '.htm':
if os.path.exists(index + ext):
to_render = True
realpath = index + ext
break
if os.path.isdir(path):
# if dir, check for existence of index.htm*
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
realpath = index
to_render = True
break
else:
return self.list_directory(path)
# deny if URL directly requests for *.html file, allow if dir
file_extension = os.path.splitext(path)[1]
if file_extension in ('.html', '.htm') and not os.path.isdir(path):
self.send_error(404, "File not found")
return None
if to_render:
path = realpath
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
fs = os.fstat(f.fileno())
if to_render:
stream, length = self.gen_response_stream(f)
else:
length = fs[6]
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(length))
self.send_header("Last-Modified", self.date_time_string(
fs.st_mtime))
self.end_headers()
if to_render:
return stream
return f |
<SYSTEM_TASK:>
Return a translated version of s where each character is mapped to a
<END_TASK>
<USER_TASK:>
Description:
def pct_encode(s, encdct):
"""
Return a translated version of s where each character is mapped to a
string thanks to the encdct dictionary.
Use the encdct parameter to construct a string from parameter s where
each character k from s is replaced by the value corresponding to key k
in encdct. It happens that callers use dictionaries smartly
constructed so that this function will perform percent-encoding quickly
when called whith such a dictionary.
""" |
if s is None:
return None
elif not isinstance(s, unicode):
s = str(s)
else:
s = s.encode('utf8')
return ''.join(map(encdct.__getitem__, s)) |
<SYSTEM_TASK:>
Split authority into component parts. This function supports
<END_TASK>
<USER_TASK:>
Description:
def split_authority(authority):
"""
Split authority into component parts. This function supports
IP-literal as defined in RFC 3986.
>>> split_authority("user:passwd@host:port")
('user', 'passwd', 'host', 'port')
>>> split_authority("user:@host:port")
('user', None, 'host', 'port')
>>> split_authority("user@host:port")
('user', None, 'host', 'port')
>>> split_authority("user@[host]:port")
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 26, in split_authority
InvalidIPLiteralError: Highly invalid IP-literal detected in URI authority 'user@[host]:port'
>>> split_authority("user@[::dead:192.168.42.131]:port")
('user', None, '[::dead:192.168.42.131]', 'port')
>>> split_authority("[::dead:192.168.42.131]:port")
(None, None, '[::dead:192.168.42.131]', 'port')
>>> split_authority(":port")
(None, None, None, 'port')
>>> split_authority("user@:port")
('user', None, None, 'port')
Very basic validation is done if the host part of the authority starts
with an '[' as when this is the case, the splitting is done in a quite
different manner than the one used by most URI parsers. As a result an
InvalidIPLiteralError exception is raised if IP-literal is patently
wrong, so the risk of major clashes between two deviant implementations
is highly reduced.
""" |
if '@' in authority:
userinfo, hostport = authority.split('@', 1)
if userinfo and ':' in userinfo:
user, passwd = userinfo.split(':', 1)
else:
user, passwd = userinfo, None
else:
user, passwd, hostport = None, None, authority
if hostport:
if hostport[0] == '[':
m = re.match(r"\[([\da-fA-F:\.]+|" + IPV_FUTURE_RE
+ r")\](\:.*|)$", hostport)
if m:
host = '[' + m.group(1) + ']'
port = m.group(2)[1:]
else:
raise InvalidIPLiteralError, "Highly invalid IP-literal detected in URI authority %r" % (authority,)
elif ':' in hostport:
host, port = hostport.split(':', 1)
else:
host, port = hostport, None
else:
host, port = None, None
return (user or None, passwd or None, host or None, port or None) |
<SYSTEM_TASK:>
Transform an URI tree so that adjacent all-empty fields are coalesced
<END_TASK>
<USER_TASK:>
Description:
def uri_tree_normalize(uri_tree):
"""
Transform an URI tree so that adjacent all-empty fields are coalesced
into a single None at parent level.
The return value can be used for validation.
As a result, no distinction is made between empty and absent fields.
It is believed that this limitation is harmless because this is the
behavior of most implementations, and even useful in the context of
this Python module because empty strings are already not distinguished
from None when converting to boolean, so we are only generalizing this
concept in order to keep code small and minimize special cases.
If the distinction is ever really needed, for example to support empty
anchor special HTTP script related URI in a clean way, one will
probably need to completely rewrite (or at least review and modify)
this module, and special care would be needed to distinguish between '',
(), None, and others everywhere implicit boolean conversion is
performed. The behavior should then be checked in regards to its
conformance with RFC 3986, especially (but this would probably not be
sufficient) the classification switches of some URI parts according to
the content of others.
""" |
scheme, authority, path, query, fragment = uri_tree
if authority and (filter(bool, authority) == ()):
authority = None
if query:
query = filter(lambda (x, y): bool(x) or bool(y), query)
return (scheme or None, authority or None, path or None,
query or None, fragment or None) |
<SYSTEM_TASK:>
Call this function to validate a raw URI tree before trying to
<END_TASK>
<USER_TASK:>
Description:
def uri_tree_precode_check(uri_tree, type_host = HOST_REG_NAME):
"""
Call this function to validate a raw URI tree before trying to
encode it.
""" |
scheme, authority, path, query, fragment = uri_tree # pylint: disable-msg=W0612
if scheme:
if not valid_scheme(scheme):
raise InvalidSchemeError, "Invalid scheme %r" % (scheme,)
if authority:
user, passwd, host, port = authority # pylint: disable-msg=W0612
if port and not __all_in(port, DIGIT):
raise InvalidPortError, "Invalid port %r" % (port,)
if type_host == HOST_IP_LITERAL:
if host and (not __valid_IPLiteral(host)):
raise InvalidIPLiteralError, "Invalid IP-literal %r" % (host,)
elif type_host == HOST_IPV4_ADDRESS:
if host and (not __valid_IPv4address(host)):
raise InvalidIPv4addressError, "Invalid IPv4address %r" % (host,)
if path:
if authority and path[0] != '/':
raise InvalidPathError, "Invalid path %r - non-absolute path can't be used with an authority" % (path,)
return uri_tree |
<SYSTEM_TASK:>
Confrim a valid configuration.
<END_TASK>
<USER_TASK:>
Description:
def check_configuration(ctx, base_key, needed_keys):
"""
Confrim a valid configuration.
Args:
ctx (invoke.context):
base_key (str): the base configuration key everything is under.
needed_keys (list): sub-keys of the base key that are checked to make
sure they exist.
""" |
# check for valid configuration
if base_key not in ctx.keys():
exit("[{}ERROR{}] missing configuration for '{}'"
.format(ERROR_COLOR, RESET_COLOR, base_key))
# TODO: offer to create configuration file
if ctx.releaser is None:
exit("[{}ERROR{}] empty configuration for '{}' found"
.format(ERROR_COLOR, RESET_COLOR, base_key))
# TODO: offer to create configuration file
# TODO: allow use of default values
for my_key in needed_keys:
if my_key not in ctx[base_key].keys():
exit("[{}ERROR{}] missing configuration key '{}.{}'"
.format(ERROR_COLOR, RESET_COLOR, base_key, my_key)) |
<SYSTEM_TASK:>
Determine whether a file or folder actually exists.
<END_TASK>
<USER_TASK:>
Description:
def check_existence(to_check, name, config_key=None, relative_to=None,
allow_undefined=False, allow_not_existing=False,
base_key='releaser'):
"""Determine whether a file or folder actually exists.""" |
if allow_undefined and (to_check is None or to_check.lower() == 'none'):
print("{: <14} -> {}UNDEFINED{}".format(name, WARNING_COLOR,
RESET_COLOR))
return
else:
if config_key is None:
config_key = "{}.{}".format(base_key, name)
my_check = Path(to_check).resolve()
if my_check.exists() and relative_to is not None:
printed_path = str(my_check.relative_to(relative_to))
if printed_path != '.':
printed_path = '.' + os.sep + printed_path
else:
printed_path = str(my_check)
if my_check.exists() or allow_not_existing:
print("{: <14} -> {}".format(name, printed_path))
return
else:
raise FileNotFoundError("[{}ERROR{}] '{}', as given, doesn't "
"exist. For configuration key '{}', was "
"given: {}".format(ERROR_COLOR, RESET_COLOR,
name, config_key,
to_check)) |
<SYSTEM_TASK:>
Given a number of pixels, determines the largest width and height that define a
<END_TASK>
<USER_TASK:>
Description:
def _determine_dimensions(num_of_pixels):
"""
Given a number of pixels, determines the largest width and height that define a
rectangle with such an area
""" |
for x in xrange(int(math.sqrt(num_of_pixels)) + 1, 1, -1):
if num_of_pixels % x == 0:
return num_of_pixels // x, x
return 1, num_of_pixels |
<SYSTEM_TASK:>
Cleans and parses text from the given HTML.
<END_TASK>
<USER_TASK:>
Description:
def textFromHTML(html):
"""
Cleans and parses text from the given HTML.
""" |
cleaner = lxml.html.clean.Cleaner(scripts=True)
cleaned = cleaner.clean_html(html)
return lxml.html.fromstring(cleaned).text_content() |
<SYSTEM_TASK:>
Interpolates the HTML source with the context, then returns that HTML
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, context):
"""
Interpolates the HTML source with the context, then returns that HTML
and the text extracted from that html.
""" |
html = self._source.format(**context)
parts = {"text/html": html, "text/plain": textFromHTML(html)}
return {}, parts |
<SYSTEM_TASK:>
List all files in a data directory.
<END_TASK>
<USER_TASK:>
Description:
def get_files(*bases):
"""
List all files in a data directory.
""" |
for base in bases:
basedir, _ = base.split(".", 1)
base = os.path.join(os.path.dirname(__file__), *base.split("."))
rem = len(os.path.dirname(base)) + len(basedir) + 2
for root, dirs, files in os.walk(base):
for name in files:
yield os.path.join(basedir, root, name)[rem:] |
<SYSTEM_TASK:>
Return the approximate equirectangular when the location is close to
<END_TASK>
<USER_TASK:>
Description:
def equirectangular_distance(self, other):
"""
Return the approximate equirectangular when the location is close to
the center of the cluster.
For small distances, Pythagoras’ theorem can be used on an
equirectangular projection.
Equirectangular formula::
x = Δλ ⋅ cos φm
y = Δφ
d = R ⋅ √(x² + y)²
It will always over-estimate compared to the real Haversine distance.
For example it will add no more than 0.05382 % to the real distance if
the delta latitude or longitude between your two points does not
exceed 4 decimal degrees.
The standard formula (Haversine) is the exact one (that is, it works
for any couple of longitude/latitude on earth) but is much slower as
it needs 7 trigonometric and 2 square roots. If your couple of points
are not too far apart, and absolute precision is not paramount, you
can use this approximate version (Equirectangular), which is much
faster as it uses only one trigonometric and one square root::
Python 2.7.6rc1 (v2.7.6rc1:4913d0e9be30+, Oct 27 2013, 20:52:11)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from majormode.perseus.model.geolocation import GeoPoint
>>> import time
>>>
>>> source = GeoPoint(106.739036, 10.797977)
>>> destination = GeoPoint(106.743325, 10.800195)
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.great_circle_distance(destination)
...
>>> print time.time() - start_time
5.62202811241
>>> print d
529.424701041
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.equirectangular_distance(destination)
...
>>> print time.time() - start_time
2.78262710571
>>> print d
529.424701073
>>>
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point.
""" |
x = math.radians(other.longitude - self.longitude) \
* math.cos(math.radians(other.latitude + self.latitude) / 2);
y = math.radians(other.latitude - self.latitude);
return math.sqrt(x * x + y * y) * GeoPoint.EARTH_RADIUS_METERS; |
<SYSTEM_TASK:>
Build a ``GeoPoint`` instance from the specified JSON object.
<END_TASK>
<USER_TASK:>
Description:
def from_json(payload):
"""
Build a ``GeoPoint`` instance from the specified JSON object.
@param payload: JSON representation of a geographic location::
{
"accuracy": decimal,
"altitude": decimal,
"bearing": decimal,
"longitude": decimal,
"latitude": decimal,
"provider": string,
"speed": decimal
}
where:
* ``accuracy``: accuracy of the position in meters.
* ``altitude``: altitude in meters of the location.
* ``bearing``: bearing in degrees. Bearing is the horizontal
direction of travel of the device, and is not related to the
device orientation. It is guaranteed to be in the range
``[0.0, 360.0]``, or ``null`` if this device does not have a
bearing.
* ``latitude``: latitude-angular distance, expressed in decimal
degrees (WGS84 datum), measured from the center of the Earth,
of a point north or south of the Equator.
* ``longitude``: longitude-angular distance, expressed in
decimal degrees (WGS84 datum), measured from the center of the
Earth, of a point east or west of the Prime Meridian.
* ``provider``: code name of the location provider that reported
the geographical location:
* ``gps``: indicate that the location has been provided by a
Global Positioning System (GPS).
* ``network``: indicate that the location has been provided by
an hybrid positioning system, which uses different
positioning technologies, such as Global Positioning System
(GPS), Wi-Fi hotspots, cell tower signals.
* ``speed``: speed in meters/second over the ground, or ``null``
if this location does not have a speed.
@return: a ``GeoPoint`` instance or ``None`` if the JSON payload is
nil.
""" |
return payload and \
GeoPoint(payload['longitude'],
payload['latitude'],
accuracy=payload.get('accuracy'),
altitude=payload.get('altitude'),
bearing=payload.get('bearing'),
fix_time=payload.get('fix_time'),
provider=payload.get('provider'),
speed=payload.get('speed')) |
<SYSTEM_TASK:>
Recursively goes through the dictionary obj and replaces keys with the convert function.
<END_TASK>
<USER_TASK:>
Description:
def change_keys(obj, convert):
"""
Recursively goes through the dictionary obj and replaces keys with the convert function.
""" |
if isinstance(obj, (str, int, float)):
return obj
if isinstance(obj, dict):
new = obj.__class__()
for k, v in obj.items():
new[convert(k)] = change_keys(v, convert)
elif isinstance(obj, (list, set, tuple)):
new = obj.__class__(change_keys(v, convert) for v in obj)
else:
return obj
return new |
<SYSTEM_TASK:>
Return the SQL string representation of the specified placeholder's
<END_TASK>
<USER_TASK:>
Description:
def _expand_placeholder_value(value):
"""
Return the SQL string representation of the specified placeholder's
value.
@param value: the value of a placeholder such as a simple element, a
list, or a tuple of one string.
@note: by convention, a tuple of one string indicates that this string
MUST not be quoted as it represents, for instance, a called to
a stored procedure, and not a textual content to modify into a
table.
@return: a SQL string representation.
""" |
if isinstance(value, (list, set)) or (isinstance(value, tuple) and len(value) != 1):
sql_value = ','.join( [ RdbmsConnection._to_sql_value(
element if not isinstance(element, tuple) else element[0],
noquote=isinstance(element, tuple))
for element in value ])
elif isinstance(value, tuple):
assert len(value) == 1
value = value[0]
assert value is None or isinstance(value, basestring), 'basestring expected instead of %s' % type(value)
sql_value = RdbmsConnection._to_sql_value(value, True)
else:
sql_value = RdbmsConnection._to_sql_value(value)
return sql_value |
<SYSTEM_TASK:>
Retrieve the list of placeholders and their type defined in an SQL
<END_TASK>
<USER_TASK:>
Description:
def _get_placeholders(sql_statement, parameters):
"""
Retrieve the list of placeholders and their type defined in an SQL
statement.
@param sql_statement: a parameterized statement.
@param parameters: the list of parameters used in the SQL statement.
@return: a dictionary of placeholders where the key represents the
name of a placeholder, the value corresponds to a tuple::
(``type:PlaceholderType``, ``value``)
where :
* ``type``: type of the placeholder
* ``value``: value to replace the placeholder.
""" |
# Find the list of placeholders, and their type, defined in the SQL
# statement.
placeholders = {}
try:
for match in REGEX_PATTERN_SQL_PLACEHOLDERS.findall(sql_statement):
for (i, placeholder_type) in enumerate(PlaceholderType._values):
placeholder_name = match[i]
if placeholder_name:
placeholder_value = parameters[placeholder_name]
if placeholder_type == PlaceholderType.nested_list \
and (isinstance(placeholder_value, tuple) and len(placeholder_value) == 1) \
and not isinstance(placeholder_value, (list, set, tuple)):
raise ValueError('The value to replace the placeholder "%s" is not a list as expected' % placeholder_name)
placeholders[placeholder_name] = (placeholder_type, placeholder_value)
break
except KeyError:
raise ValueError('The placeholder %s has no corresponding parameter' % placeholder_name)
# Check whether all the specified parameters have their corresponding
# placeholder in the SQL statement.
undefined_placeholders = [ parameter for parameter in parameters.iterkeys()
if parameter not in placeholders ]
if undefined_placeholders:
raise ValueError('The placeholders %s are missing from the extended pyformat SQL statement\n%s' \
% (', '.join([ '"%s"' % _ for _ in undefined_placeholders ]), sql_statement))
return placeholders |
<SYSTEM_TASK:>
Prepare the specified SQL statement, replacing the placeholders by the
<END_TASK>
<USER_TASK:>
Description:
def _prepare_statement(sql_statement, parameters):
"""
Prepare the specified SQL statement, replacing the placeholders by the
value of the given parameters
@param sql_statement: the string expression of a SQL statement.
@param parameters: a dictionary of parameters where the key represents
the name of a parameter and the value represents the value of this
parameter to replace in each placeholder of this parameter in the
SQL statement.
@return: a string representation of the SQL statement where the
placehodlers have been replaced by the value of the corresponding
variables, depending on the type of these variables.
""" |
placehoolders = RdbmsConnection._get_placeholders(sql_statement, parameters)
for (variable_name, (variable_type, variable_value)) in placehoolders.iteritems():
# Only expand parameters whose value corresponds to a list.
if isinstance(variable_value, (list, set, tuple)):
sql_statement = RdbmsConnection._replace_placeholder(sql_statement,
(variable_name, variable_type, variable_value))
# Remove this parameter as it has been expended in the SQL expression.
del parameters[variable_name]
return sql_statement |
<SYSTEM_TASK:>
Return the string obtained by replacing the specified placeholders by
<END_TASK>
<USER_TASK:>
Description:
def _replace_placeholder(sql_statement, variable):
"""
Return the string obtained by replacing the specified placeholders by
its corresponding value.
@param sql_statement: the string expression of a SQL statement to
replace placeholders with their corresponding values.
@param variable: the variable to use to replace the corresponding
placeholder(s) in the SQL statement.
* ``name``: name of the variable.
* ``type``: an instance of ``PlaceholderType``.
* ``value``: the value of this variable to replace the corresponding
placeholder(s) of this variable in the SQL statement.
@return: a string expression of the SQL statement where the
paceholders of the specified variable have been replace by the
value of this variable, depending on the type of this varialble.
""" |
(variable_name, variable_type, variable_value) = variable
sql_value = RdbmsConnection._expand_placeholder_value(variable_value) if variable_type == PlaceholderType.simple_list \
else ','.join([ '(%s)' % RdbmsConnection._expand_placeholder_value(v) for v in variable_value ])
return re.sub(PATTERN_SQL_PLACEHOLDER_EXPRESSIONS[variable_type] % variable_name, sql_value, sql_statement) |
<SYSTEM_TASK:>
Return the SQL string representation of the specified value.
<END_TASK>
<USER_TASK:>
Description:
def _to_sql_value(value, noquote=False):
"""
Return the SQL string representation of the specified value.
@param value: a value to convert into its SQL string representation.
@param noquote: indicate whether to quote or not the specified value.
@return: a SQL string representation of the specified value.
""" |
# Convert to string the values that the database adapter can't adapt
# to a SQL type.
# [http://initd.org/psycopg/docs/usage.html#query-parameters]
if not isinstance(value, (types.NoneType, bool, int, long, float, basestring)):
value = obj.stringify(value)
if noquote:
return value
# @warning: do not use ``psycopg2.extensions.adapt(value).getquoted()``
# because it returns ``str`` object, which is expected as adaptation
# is taking a Python object and converting it into a SQL
# representation: this is always a bytes string, as it has to be
# sent to the socket. However the caller might not use the quoted
# value to immediately sent it to the database server, but it can
# use it for rewriting an SQL statement, which will break the text
# encoding.
return 'NULL' if value is None \
else '%s' % str(value) if isinstance(value, (bool, int, long, float)) \
else "e'%s'" % unicode(value).replace("'", "''").replace('\\', '\\\\').replace('%', '%%') |
<SYSTEM_TASK:>
Given a launchable resource, create a definition of a standalone
<END_TASK>
<USER_TASK:>
Description:
def standalone_from_launchable(cls, launch):
"""
Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements.
""" |
attrs = copy.copy(launch.el_attrs)
# Remove attributes we overwrite / don't need
del attrs["Type"]
if attrs.has_key("DependsOn"):
del attrs["DependsOn"]
if attrs["Properties"].has_key("SpotPrice"):
del attrs["Properties"]["SpotPrice"]
if attrs["Properties"].has_key("InstanceMonitoring"):
del attrs["Properties"]["InstanceMonitoring"]
if attrs["Properties"].has_key("SecurityGroups"):
del attrs["Properties"]["SecurityGroups"]
if attrs["Properties"].has_key("InstanceId"):
raise RuntimeError("Can't make instance from launchable containing InstanceId property")
inst = EC2Instance(**attrs)
# TODO: shallow copy?
inst.iscm = launch.iscm
return inst |
<SYSTEM_TASK:>
Filter available varaibles
<END_TASK>
<USER_TASK:>
Description:
def filter_data(self, pattern=''):
"""
Filter available varaibles
""" |
filtered_profiles = {}
with open(self.abspath) as fobj:
for idx, line in enumerate(fobj):
if 'TIME SERIES' in line:
break
if pattern in line and (idx-self._attributes['CATALOG']-1) > 0:
filtered_profiles[idx-self._attributes['CATALOG']-1] = line
return filtered_profiles |
<SYSTEM_TASK:>
Function to convert an email Message to standard format string
<END_TASK>
<USER_TASK:>
Description:
def canonicalize(message):
"""
Function to convert an email Message to standard format string
:param message: email.Message to be converted to standard string
:return: the standard representation of the email message in bytes
""" |
if message.is_multipart() \
or message.get('Content-Transfer-Encoding') != 'binary':
return mime_to_bytes(message, 0).replace(
b'\r\n', b'\n').replace(b'\r', b'\n').replace(b'\n', b'\r\n')
else:
message_header = ''
message_body = message.get_payload(decode=True)
for k, v in message.items():
message_header += '{}: {}\r\n'.format(k, v)
message_header += '\r\n'
return message_header.encode('utf-8') + message_body |
<SYSTEM_TASK:>
Function to extract the first part of a multipart message
<END_TASK>
<USER_TASK:>
Description:
def extract_first_part(message, boundary):
""" Function to extract the first part of a multipart message""" |
first_message = message.split(boundary)[1].lstrip()
if first_message.endswith(b'\r\n'):
first_message = first_message[:-2]
else:
first_message = first_message[:-1]
return first_message |
<SYSTEM_TASK:>
Converts a given certificate or list to PEM format
<END_TASK>
<USER_TASK:>
Description:
def pem_to_der(cert, return_multiple=True):
""" Converts a given certificate or list to PEM format""" |
# initialize the certificate array
cert_list = []
# If certificate is in DER then un-armour it
if pem.detect(cert):
for _, _, der_bytes in pem.unarmor(cert, multiple=True):
cert_list.append(der_bytes)
else:
cert_list.append(cert)
# return multiple if return_multiple is set else first element
if return_multiple:
return cert_list
else:
return cert_list.pop() |
<SYSTEM_TASK:>
Verify a given certificate against a trust store
<END_TASK>
<USER_TASK:>
Description:
def verify_certificate_chain(cert_str, trusted_certs, ignore_self_signed=True):
""" Verify a given certificate against a trust store""" |
# Load the certificate
certificate = crypto.load_certificate(crypto.FILETYPE_ASN1, cert_str)
# Create a certificate store and add your trusted certs
try:
store = crypto.X509Store()
if ignore_self_signed:
store.add_cert(certificate)
# Assuming the certificates are in PEM format in a trusted_certs list
for _cert in trusted_certs:
store.add_cert(
crypto.load_certificate(crypto.FILETYPE_ASN1, _cert))
# Create a certificate context using the store and the certificate
store_ctx = crypto.X509StoreContext(store, certificate)
# Verify the certificate, returns None if certificate is not valid
store_ctx.verify_certificate()
return True
except crypto.X509StoreContextError as e:
raise AS2Exception('Partner Certificate Invalid: %s' % e.args[-1][-1]) |
<SYSTEM_TASK:>
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
<END_TASK>
<USER_TASK:>
Description:
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED'
if it has not been provided by either a view decorator or the middleware
""" |
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
token = force_bytes(token, encoding='latin-1')
key = force_bytes(
get_random_string(len(token)),
encoding='latin-1'
)
value = b64_encode(xor(token, key))
return force_text(b'$'.join((key, value)), encoding='latin-1')
_get_val = lazy(_get_val, text_type)
return {'csrf_token': _get_val()} |
<SYSTEM_TASK:>
Generate URL for a Taskcluster api reference.
<END_TASK>
<USER_TASK:>
Description:
def api_reference(root_url, service, version):
"""Generate URL for a Taskcluster api reference.""" |
root_url = root_url.rstrip('/')
if root_url == OLD_ROOT_URL:
return 'https://references.taskcluster.net/{}/{}/api.json'.format(service, version)
else:
return '{}/references/{}/{}/api.json'.format(root_url, service, version) |
<SYSTEM_TASK:>
Generate URL for a Taskcluster exchange reference.
<END_TASK>
<USER_TASK:>
Description:
def exchange_reference(root_url, service, version):
"""Generate URL for a Taskcluster exchange reference.""" |
root_url = root_url.rstrip('/')
if root_url == OLD_ROOT_URL:
return 'https://references.taskcluster.net/{}/{}/exchanges.json'.format(service, version)
else:
return '{}/references/{}/{}/exchanges.json'.format(root_url, service, version) |
<SYSTEM_TASK:>
Generate URL for a schema in a Taskcluster service.
<END_TASK>
<USER_TASK:>
Description:
def schema(root_url, service, name):
"""Generate URL for a schema in a Taskcluster service.""" |
root_url = root_url.rstrip('/')
name = name.lstrip('/')
if root_url == OLD_ROOT_URL:
return 'https://schemas.taskcluster.net/{}/{}'.format(service, name)
else:
return '{}/schemas/{}/{}'.format(root_url, service, name) |
<SYSTEM_TASK:>
Example sealer that just does regex-based validation.
<END_TASK>
<USER_TASK:>
Description:
def regex_validation_sealer(fields, defaults, RegexType=type(re.compile(""))):
"""
Example sealer that just does regex-based validation.
""" |
required = set(fields) - set(defaults)
if required:
raise TypeError(
"regex_validation_sealer doesn't support required arguments. Fields that need fixing: %s" % required)
klass = None
kwarg_validators = dict(
(key, val if isinstance(val, RegexType) else re.compile(val)) for key, val in defaults.items()
)
arg_validators = list(
kwarg_validators[key] for key in fields
)
def __init__(self, *args, **kwargs):
for pos, (value, validator) in enumerate(zip(args, arg_validators)):
if not validator.match(value):
raise ValidationError("Positional argument %s failed validation. %r doesn't match regex %r" % (
pos, value, validator.pattern
))
for key, value in kwargs.items():
if key in kwarg_validators:
validator = kwarg_validators[key]
if not validator.match(value):
raise ValidationError("Keyword argument %r failed validation. %r doesn't match regex %r" % (
key, value, validator.pattern
))
super(klass, self).__init__(*args, **kwargs)
klass = type("RegexValidateBase", (__base__,), dict(
__init__=__init__,
))
return klass |
<SYSTEM_TASK:>
Read function for PI server. It has to be executed by python 32 bit.
<END_TASK>
<USER_TASK:>
Description:
def PI_read(server, tag, start=None, end=None, frequency=None):
"""
Read function for PI server. It has to be executed by python 32 bit.
""" |
pisdk = Dispatch('PISDK.PISDK')
my_server = pisdk.Servers(server)
# Not sure if/when the login is necessary
#con = Dispatch('PISDKDlg.Connections')
#con.Login(my_server, '', '', 1, 0)
time_start = Dispatch('PITimeServer.PITimeFormat')
time_end = Dispatch('PITimeServer.PITimeFormat')
sample_point = my_server.PIPoints[tag]
uom = sample_point.PointAttributes.Item("EngUnits").Value
description = sample_point.PointAttributes.Item('Descriptor').Value
if start != None and end != None:
# returns a range of values (average)
time_start.InputString = start.strftime('%m-%d-%Y %H:%M:%S')
time_end.InputString = end.strftime('%m-%d-%Y %H:%M:%S')
sample_values = sample_point.Data.Summaries2(time_start, time_end,
frequency, 5, 0, None)
values = sample_values('Average').Value
data = np.array([x.Value for x in values])
elif start != None and end == None:
# returns a single value at the start time
end = start + timedelta(seconds=1)
time_start.InputString = start.strftime('%m-%d-%Y %H:%M:%S')
time_end.InputString = end.strftime('%m-%d-%Y %H:%M:%S')
sample_values = sample_point.Data.Summaries2(time_start, time_end,
frequency, 5, 0, None)
values = sample_values('Average').Value
data = [x.Value for x in values][0]
else:
# returns the actual value
data = sample_point.data.Snapshot.Value
return description, uom, np.array(data) |
<SYSTEM_TASK:>
Remove some useless stuff from the head of a csv file generated by unisim
<END_TASK>
<USER_TASK:>
Description:
def unisim_csv_formatting(path, fname):
"""
Remove some useless stuff from the head of a csv file generated by unisim
and returns a pandas dataframe
""" |
with open(path+fname, 'r') as fobj:
data = fobj.readlines()
header = data[9].split(",")[:-1]
unit_of_measures = data[10].split(",")[:-1]
data = pd.read_csv(path+fname,
skiprows=10,
index_col=0,
usecols=(range(0, len(header))),
na_values=('Shutdown', 'Bad',
'I/O Timeout', 'Scan Timeout'))
data.columns = header[1:]
data.unit = unit_of_measures[1:]
return data |
<SYSTEM_TASK:>
Extract a specific stripchard and exposes the data in the namespace
<END_TASK>
<USER_TASK:>
Description:
def extract_stripchart(self, stripchart='overall', expose_data=True):
"""
Extract a specific stripchard and exposes the data in the namespace
""" |
csv_fname = self.fname.split(os.sep)[-1].replace(".usc", ".csv")
scp_fname = self.fname.split(os.sep)[-1].replace(".usc", ".SCP")
case_details = {'case': self.fname.__repr__()[1:-1],
'stripchart': stripchart,
'target': self.path.__repr__()[1:-1] + csv_fname}
script = STRIPCHART_EXTRACTION_TEMPLATE.substitute(case_details)
with open(self.path + scp_fname, 'w') as fobj:
fobj.write(script)
self.case.visible = True
self.case.application.playscript(self.path + scp_fname)
self.case.visible = False
os.remove(self.path + scp_fname)
if expose_data is True:
self.stripcharts[stripchart] = unisim_csv_formatting(self.path,
csv_fname)
if os.path.isdir(self.path+'trends') is not True:
os.mkdir(self.path + 'trends')
shutil.copy(self.path + csv_fname,
self.path + 'trends\\{}.csv'.format(stripchart))
os.remove(self.path + csv_fname) |
<SYSTEM_TASK:>
Extract all the profiles of a specific pipeline and exposes the data
<END_TASK>
<USER_TASK:>
Description:
def extract_profiles(self, pipeline_name, expose_data=True):
"""
Extract all the profiles of a specific pipeline and exposes the data
in the namespace
""" |
compas_pipe = self.__profile_definition(pipeline_name)
get_variable = compas_pipe.GEtUserVariable
if os.path.isdir(self.path+'profiles') is not True:
os.mkdir(self.path + 'profiles')
target_dir = self.path + 'profiles'
if expose_data is True:
self.profiles[pipeline_name] = {}
for key in PROFILE_KEYS:
pipe = self.pipes[pipeline_name]
pipe['data'][key] = get_variable(PROFILE_KEYS[key]).Variable()
temp = {key: val for (key, val) in enumerate(pipe['data'][key])}
try:
data = pd.DataFrame(temp, index=pipe['grid'])
except ValueError:
data = pd.DataFrame(temp, index=pipe['non_st_grid'])
data.columns = self.pipes[pipeline_name]['timesteps']
data.to_csv('{}/{}-{}.csv'.format(target_dir, pipeline_name, key))
if expose_data is True:
self.profiles[pipeline_name][key] = data |
<SYSTEM_TASK:>
Run a case untile the specifiend endtime
<END_TASK>
<USER_TASK:>
Description:
def run_until(self, endtime, timeunit='minutes', save=True):
"""
Run a case untile the specifiend endtime
""" |
integrator = self.case.solver.Integrator
integrator.rununtil(endtime, timeunit)
if save is True:
self.case.save() |
<SYSTEM_TASK:>
Save the current case
<END_TASK>
<USER_TASK:>
Description:
def save(self, fname=''):
"""
Save the current case
""" |
if fname is '':
self.case.save()
else:
self.case.SaveAs(self.path+os.sep+fname) |
<SYSTEM_TASK:>
This sealer returns an equivalent of a ``namedtuple``.
<END_TASK>
<USER_TASK:>
Description:
def tuple_sealer(fields, defaults):
"""
This sealer returns an equivalent of a ``namedtuple``.
""" |
baseclass_name = 'FieldsBase_for__{0}'.format('__'.join(fields))
global_namespace, local_namespace = make_init_func(
fields, defaults, baseclass_name,
header_name='__new__',
header_start='def {func_name}(cls',
header_end='):\n',
super_call_start='return tuple.__new__(cls, (',
super_call_end='))\n',
super_call_pass_kwargs=False, set_attributes=False,
)
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
", ".join(a + "=" + repr(getattr(self, a)) for a in fields)
)
return type(baseclass_name, (tuple,), dict(
[(name, property(itemgetter(i))) for i, name in enumerate(fields)],
__new__=local_namespace['__new__'],
__getnewargs__=__getnewargs__,
__repr__=__repr__,
__slots__=(),
)) |
<SYSTEM_TASK:>
Private method to define the tab type
<END_TASK>
<USER_TASK:>
Description:
def _tab_type(self):
"""
Private method to define the tab type
""" |
with open(self.abspath) as fobj:
contents = fobj.readlines()
for line in contents:
if 'COMPONENTS' in line:
return 'keyword'
else:
return 'fixed' |
<SYSTEM_TASK:>
Private method for a single extraction on a fixed-type tab file
<END_TASK>
<USER_TASK:>
Description:
def _partial_extraction_fixed(self, idx, extra_idx=0):
"""
Private method for a single extraction on a fixed-type tab file
""" |
myarray = np.array([])
with open(self.abspath) as fobj:
contents = fobj.readlines()[idx+extra_idx:]
for line in contents:
try:
vals = re.findall(r' *[\w\-\+\.]*', line)
temp = np.array([float(val) for val in vals
if val not in ('', ' ')])
myarray = np.hstack((myarray, temp))
except ValueError:
break
return myarray |
<SYSTEM_TASK:>
Exports all the properties for a fixed-type tab file
<END_TASK>
<USER_TASK:>
Description:
def _export_all_fixed(self):
"""
Exports all the properties for a fixed-type tab file
""" |
array_ts = []
array_ps = []
for array_t, array_p in it.product(self.metadata["t_array"][0],
self.metadata["p_array"][0]):
array_ts.append(array_t)
array_ps.append(array_p/1e5)
array_ts_tot = [array_ts for t in self.data.index]
array_ps_tot = [array_ps for t in self.data.index]
values = []
for idx in self.data.index:
values.append(self._partial_extraction_fixed(idx+1))
self.data["Temperature"] = array_ts_tot
self.data["Pressure"] = array_ps_tot
self.data["values"] = values |
<SYSTEM_TASK:>
Define the most important tab parameters for a keyword-type tab file
<END_TASK>
<USER_TASK:>
Description:
def _metadata_keyword(self):
"""
Define the most important tab parameters for a keyword-type tab file
""" |
with open(self.abspath) as fobj:
for line in fobj:
if 'PVTTABLE LABEL' in line:
label = re.findall(r"\=[\w\ \"]*\,", line)[0][1:-1]
self.metadata["fluids"].append(label)
if 'PRESSURE = (' in line:
line = line.split('=')[1]
vals = re.findall(r'[\d\-\.eE+]+', line)
self.metadata['p_array'] = np.array([float(val) for val in
vals])
if 'TEMPERATURE = (' in line:
line = line.split('=')[1]
vals = re.findall(r'[\d\-\.eE+]+', line)
self.metadata['t_array'] = np.array([float(val) for val in
vals])
if 'COLUMNS = (' in line:
line = line.split('=')[1].replace(" ", "").\
replace('(', '').replace(')\n', '')
self.metadata['properties'] = line.split(',')
self.metadata["t_points"] = len(self.metadata["t_array"])
self.metadata["p_points"] = len(self.metadata["p_array"])
self.metadata["nfluids"] = len(self.metadata["fluids"])
self.data = pd.DataFrame(self.metadata["properties"]) |
<SYSTEM_TASK:>
It makes all the properties avaiable as data attribute
<END_TASK>
<USER_TASK:>
Description:
def export_all(self):
"""
It makes all the properties avaiable as data attribute
""" |
if self.tab_type == 'fixed':
self._export_all_fixed()
if self.tab_type == 'keyword':
self._export_all_keyword() |
<SYSTEM_TASK:>
Read packets from the socket and parse them
<END_TASK>
<USER_TASK:>
Description:
def read_packets(self):
"""Read packets from the socket and parse them""" |
while self.running:
packet_length = self.client.recv(2)
if len(packet_length) < 2:
self.stop()
continue
packet_length = struct.unpack("<h", packet_length)[0] - 2
data = self.client.recv(packet_length)
packno = data[0]
try:
parser = "Packet" + format(packno, 'x').upper() + "Parser"
packet_class = getattr(packets, parser)
packet_class().parse(self.world, self.player, data, self._evman)
except AttributeError as e:
pass
if packno == 2:
self.stop()
continue |
<SYSTEM_TASK:>
Open sockets to the server and start threads
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Open sockets to the server and start threads""" |
if not self.writeThread.isAlive() and not self.readThread.isAlive():
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(self.ADDR)
self.running = True
self.writeThread.start()
self.readThread.start() |
<SYSTEM_TASK:>
Add or update an existing parameter.
<END_TASK>
<USER_TASK:>
Description:
def set_param(self, name: str, v) -> None:
"""Add or update an existing parameter.""" |
self.params[zlib.crc32(name.encode())] = v |
<SYSTEM_TASK:>
Add or update an existing object.
<END_TASK>
<USER_TASK:>
Description:
def set_object(self, name: str, pobj: ParameterObject) -> None:
"""Add or update an existing object.""" |
self.objects[zlib.crc32(name.encode())] = pobj |
<SYSTEM_TASK:>
Add or update an existing list.
<END_TASK>
<USER_TASK:>
Description:
def set_list(self, name: str, plist) -> None:
"""Add or update an existing list.""" |
self.lists[zlib.crc32(name.encode())] = plist |
<SYSTEM_TASK:>
Sets up the queue runners for data input
<END_TASK>
<USER_TASK:>
Description:
def queue_setup(filename, mode, batch_size, num_readers, min_examples):
""" Sets up the queue runners for data input """ |
filename_queue = tf.train.string_input_producer([filename], shuffle=True, capacity=16)
if mode == "train":
examples_queue = tf.RandomShuffleQueue(capacity=min_examples + 3 * batch_size,
min_after_dequeue=min_examples, dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(capacity=min_examples + 3 * batch_size, dtypes=[tf.string])
enqueue_ops = list()
for _ in range(num_readers):
reader = tf.TFRecordReader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
return example_serialized |
<SYSTEM_TASK:>
Sets up the threads within each reader
<END_TASK>
<USER_TASK:>
Description:
def thread_setup(read_and_decode_fn, example_serialized, num_threads):
""" Sets up the threads within each reader """ |
decoded_data = list()
for _ in range(num_threads):
decoded_data.append(read_and_decode_fn(example_serialized))
return decoded_data |
<SYSTEM_TASK:>
Load config dict and yaml dict and then override both with flags dict.
<END_TASK>
<USER_TASK:>
Description:
def load_config_yaml(self, flags, config_dict):
""" Load config dict and yaml dict and then override both with flags dict. """ |
if config_dict is None:
print('Config File not specified. Using only input flags.')
return flags
try:
config_yaml_dict = self.cfg_from_file(flags['YAML_FILE'], config_dict)
except KeyError:
print('Yaml File not specified. Using only input flags and config file.')
return config_dict
print('Using input flags, config file, and yaml file.')
config_yaml_flags_dict = self._merge_a_into_b_simple(flags, config_yaml_dict)
return config_yaml_flags_dict |
<SYSTEM_TASK:>
Fill in all optional keys with None. Exit in a crucial key is not defined.
<END_TASK>
<USER_TASK:>
Description:
def check_dict_keys(self, config_yaml_flags_dict):
""" Fill in all optional keys with None. Exit in a crucial key is not defined. """ |
crucial_keys = ['MODEL_DIRECTORY', 'SAVE_DIRECTORY']
for key in crucial_keys:
if key not in config_yaml_flags_dict:
print('You must define %s. Now exiting...' % key)
exit()
optional_keys = ['RESTORE_SLIM_FILE', 'RESTORE_META', 'RESTORE_SLIM', 'SEED', 'GPU']
for key in optional_keys:
if key not in config_yaml_flags_dict:
config_yaml_flags_dict[key] = None
print('%s in flags, yaml or config dictionary was not found.' % key)
if 'RUN_NUM' not in config_yaml_flags_dict:
config_yaml_flags_dict['RUN_NUM'] = 0
if 'NUM_EPOCHS' not in config_yaml_flags_dict:
config_yaml_flags_dict['NUM_EPOCHS'] = 1
return config_yaml_flags_dict |
<SYSTEM_TASK:>
Create and define logging directory
<END_TASK>
<USER_TASK:>
Description:
def _check_file_io(self):
""" Create and define logging directory """ |
folder = 'Model' + str(self.flags['RUN_NUM']) + '/'
folder_restore = 'Model' + str(self.flags['MODEL_RESTORE']) + '/'
self.flags['RESTORE_DIRECTORY'] = self.flags['SAVE_DIRECTORY'] + self.flags[
'MODEL_DIRECTORY'] + folder_restore
self.flags['LOGGING_DIRECTORY'] = self.flags['SAVE_DIRECTORY'] + self.flags[
'MODEL_DIRECTORY'] + folder
self.make_directory(self.flags['LOGGING_DIRECTORY'])
sys.stdout = Logger(self.flags['LOGGING_DIRECTORY'] + 'ModelInformation.log')
print(self.flags) |
<SYSTEM_TASK:>
Sets up summary writer, saver, and session, with configurable gpu visibility
<END_TASK>
<USER_TASK:>
Description:
def _set_tf_functions(self):
""" Sets up summary writer, saver, and session, with configurable gpu visibility """ |
merged = tf.summary.merge_all()
saver = tf.train.Saver()
if type(self.flags['GPU']) is int:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.flags['GPU'])
print('Using GPU %d' % self.flags['GPU'])
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
sess = tf.Session(config=config)
writer = tf.summary.FileWriter(self.flags['LOGGING_DIRECTORY'], sess.graph)
return merged, saver, sess, writer |
<SYSTEM_TASK:>
Restore from meta file. 'RESTORE_META_FILE' is expected to have .meta at the end.
<END_TASK>
<USER_TASK:>
Description:
def _restore_meta(self):
""" Restore from meta file. 'RESTORE_META_FILE' is expected to have .meta at the end. """ |
restore_meta_file = self._get_restore_meta_file()
filename = self.flags['RESTORE_DIRECTORY'] + self._get_restore_meta_file()
new_saver = tf.train.import_meta_graph(filename)
new_saver.restore(self.sess, filename[:-5])
print("Model restored from %s" % restore_meta_file) |
<SYSTEM_TASK:>
Initialize the defined network and restore from files is so specified.
<END_TASK>
<USER_TASK:>
Description:
def _initialize_model(self):
""" Initialize the defined network and restore from files is so specified. """ |
# Initialize all variables first
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
if self.flags['RESTORE_META'] == 1:
print('Restoring from .meta file')
self._restore_meta()
elif self.flags['RESTORE_SLIM'] == 1:
print('Restoring TF-Slim Model.')
all_model_variables = tf.global_variables()
self._restore_slim(all_model_variables)
else:
print("Model training from scratch.") |
<SYSTEM_TASK:>
Initialize all other trainable variables, i.e. those which are uninitialized
<END_TASK>
<USER_TASK:>
Description:
def _init_uninit_vars(self):
""" Initialize all other trainable variables, i.e. those which are uninitialized """ |
uninit_vars = self.sess.run(tf.report_uninitialized_variables())
vars_list = list()
for v in uninit_vars:
var = v.decode("utf-8")
vars_list.append(var)
uninit_vars_tf = [v for v in tf.global_variables() if v.name.split(':')[0] in vars_list]
self.sess.run(tf.variables_initializer(var_list=uninit_vars_tf)) |
<SYSTEM_TASK:>
Save model in the logging directory
<END_TASK>
<USER_TASK:>
Description:
def _save_model(self, section):
""" Save model in the logging directory """ |
checkpoint_name = self.flags['LOGGING_DIRECTORY'] + 'part_%d' % section + '.ckpt'
save_path = self.saver.save(self.sess, checkpoint_name)
print("Model saved in file: %s" % save_path) |
<SYSTEM_TASK:>
Adds summary to writer and increments the step.
<END_TASK>
<USER_TASK:>
Description:
def _record_training_step(self, summary):
""" Adds summary to writer and increments the step. """ |
self.writer.add_summary(summary=summary, global_step=self.step)
self.step += 1 |
<SYSTEM_TASK:>
Set random seed for numpy and tensorflow packages
<END_TASK>
<USER_TASK:>
Description:
def _set_seed(self):
""" Set random seed for numpy and tensorflow packages """ |
if self.flags['SEED'] is not None:
tf.set_random_seed(self.flags['SEED'])
np.random.seed(self.flags['SEED']) |
<SYSTEM_TASK:>
Print out summaries for every variable. Can be overriden in main function.
<END_TASK>
<USER_TASK:>
Description:
def _summaries(self):
""" Print out summaries for every variable. Can be overriden in main function. """ |
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var)
print(var.name) |
<SYSTEM_TASK:>
Returns a string for various input types
<END_TASK>
<USER_TASK:>
Description:
def check_str(obj):
""" Returns a string for various input types """ |
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj) |
<SYSTEM_TASK:>
Removes 'model' scoping if it is present in order to properly restore weights.
<END_TASK>
<USER_TASK:>
Description:
def name_in_checkpoint(var):
""" Removes 'model' scoping if it is present in order to properly restore weights. """ |
if var.op.name.startswith('model/'):
return var.op.name[len('model/'):] |
<SYSTEM_TASK:>
Merge config dictionary a into config dictionary b, clobbering the
<END_TASK>
<USER_TASK:>
Description:
def _merge_a_into_b(self, a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
""" |
from easydict import EasyDict as edict
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
self._merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
return b |
<SYSTEM_TASK:>
Merge config dictionary a into config dictionary b, clobbering the
<END_TASK>
<USER_TASK:>
Description:
def _merge_a_into_b_simple(self, a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a. Do not do any checking.
""" |
for k, v in a.items():
b[k] = v
return b |
<SYSTEM_TASK:>
Load a config file and merge it into the default options.
<END_TASK>
<USER_TASK:>
Description:
def cfg_from_file(self, yaml_filename, config_dict):
"""Load a config file and merge it into the default options.""" |
import yaml
from easydict import EasyDict as edict
with open(yaml_filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
return self._merge_a_into_b(yaml_cfg, config_dict) |
<SYSTEM_TASK:>
Function encrypts data and returns the generated ASN.1
<END_TASK>
<USER_TASK:>
Description:
def encrypt_message(data_to_encrypt, enc_alg, encryption_cert):
"""Function encrypts data and returns the generated ASN.1
:param data_to_encrypt: A byte string of the data to be encrypted
:param enc_alg: The algorithm to be used for encrypting the data
:param encryption_cert: The certificate to be used for encrypting the data
:return: A CMS ASN.1 byte string of the encrypted data.
""" |
enc_alg_list = enc_alg.split('_')
cipher, key_length, mode = enc_alg_list[0], enc_alg_list[1], enc_alg_list[2]
enc_alg_asn1, key, encrypted_content = None, None, None
# Generate the symmetric encryption key and encrypt the message
if cipher == 'tripledes':
key = util.rand_bytes(int(key_length)//8)
iv, encrypted_content = symmetric.tripledes_cbc_pkcs5_encrypt(
key, data_to_encrypt, None)
enc_alg_asn1 = algos.EncryptionAlgorithm({
'algorithm': algos.EncryptionAlgorithmId('tripledes_3key'),
'parameters': cms.OctetString(iv)
})
# Encrypt the key and build the ASN.1 message
encrypted_key = asymmetric.rsa_pkcs1v15_encrypt(encryption_cert, key)
return cms.ContentInfo({
'content_type': cms.ContentType('enveloped_data'),
'content': cms.EnvelopedData({
'version': cms.CMSVersion('v0'),
'recipient_infos': [
cms.KeyTransRecipientInfo({
'version': cms.CMSVersion('v0'),
'rid': cms.RecipientIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': encryption_cert.asn1[
'tbs_certificate']['issuer'],
'serial_number': encryption_cert.asn1[
'tbs_certificate']['serial_number']
})
}),
'key_encryption_algorithm': cms.KeyEncryptionAlgorithm({
'algorithm': cms.KeyEncryptionAlgorithmId('rsa')
}),
'encrypted_key': cms.OctetString(encrypted_key)
})
],
'encrypted_content_info': cms.EncryptedContentInfo({
'content_type': cms.ContentType('data'),
'content_encryption_algorithm': enc_alg_asn1,
'encrypted_content': encrypted_content
})
})
}).dump() |
<SYSTEM_TASK:>
Function to load password protected key file in p12 or pem format
<END_TASK>
<USER_TASK:>
Description:
def load_key(key_str, key_pass):
""" Function to load password protected key file in p12 or pem format""" |
try:
# First try to parse as a p12 file
key, cert, _ = asymmetric.load_pkcs12(key_str, key_pass)
except ValueError as e:
# If it fails due to invalid password raise error here
if e.args[0] == 'Password provided is invalid':
raise AS2Exception('Password not valid for Private Key.')
# if not try to parse as a pem file
key, cert = None, None
for kc in split_pem(key_str):
try:
cert = asymmetric.load_certificate(kc)
except (ValueError, TypeError):
try:
key = asymmetric.load_private_key(kc, key_pass)
except OSError:
raise AS2Exception(
'Invalid Private Key or password is not correct.')
if not key or not cert:
raise AS2Exception(
'Invalid Private key file or Public key not included.')
return key, cert |
<SYSTEM_TASK:>
Function returns the body of the as2 payload as a bytes object
<END_TASK>
<USER_TASK:>
Description:
def content(self):
"""Function returns the body of the as2 payload as a bytes object""" |
if not self.payload:
return ''
if self.payload.is_multipart():
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
content = self.payload.get_payload()
if isinstance(content, str_cls):
content = content.encode('utf-8')
return content |
<SYSTEM_TASK:>
Function returns the body of the mdn message as a byte string
<END_TASK>
<USER_TASK:>
Description:
def content(self):
"""Function returns the body of the mdn message as a byte string""" |
if self.payload:
message_bytes = mime_to_bytes(
self.payload, 0).replace(b'\n', b'\r\n')
boundary = b'--' + self.payload.get_boundary().encode('utf-8')
temp = message_bytes.split(boundary)
temp.pop(0)
return boundary + boundary.join(temp)
else:
return '' |
<SYSTEM_TASK:>
Function parses the RAW AS2 MDN, verifies it and extracts the
<END_TASK>
<USER_TASK:>
Description:
def parse(self, raw_content, find_message_cb):
"""Function parses the RAW AS2 MDN, verifies it and extracts the
processing status of the orginal AS2 message.
:param raw_content:
A byte string of the received HTTP headers followed by the body.
:param find_message_cb:
A callback the must returns the original Message Object. The
original message-id and original recipient AS2 ID are passed
as arguments to it.
:returns:
A two element tuple containing (status, detailed_status). The
status is a string indicating the status of the transaction. The
optional detailed_status gives additional information about the
processing status.
""" |
status, detailed_status = None, None
self.payload = parse_mime(raw_content)
self.orig_message_id, orig_recipient = self.detect_mdn()
# Call the find message callback which should return a Message instance
orig_message = find_message_cb(self.orig_message_id, orig_recipient)
# Extract the headers and save it
mdn_headers = {}
for k, v in self.payload.items():
k = k.lower()
if k == 'message-id':
self.message_id = v.lstrip('<').rstrip('>')
mdn_headers[k] = v
if orig_message.receiver.mdn_digest_alg \
and self.payload.get_content_type() != 'multipart/signed':
status = 'failed/Failure'
detailed_status = 'Expected signed MDN but unsigned MDN returned'
return status, detailed_status
if self.payload.get_content_type() == 'multipart/signed':
signature = None
message_boundary = (
'--' + self.payload.get_boundary()).encode('utf-8')
for part in self.payload.walk():
if part.get_content_type() == 'application/pkcs7-signature':
signature = part.get_payload(decode=True)
elif part.get_content_type() == 'multipart/report':
self.payload = part
# Verify the message, first using raw message and if it fails
# then convert to canonical form and try again
mic_content = extract_first_part(raw_content, message_boundary)
verify_cert = orig_message.receiver.load_verify_cert()
try:
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
except IntegrityError:
mic_content = canonicalize(self.payload)
self.digest_alg = verify_message(
mic_content, signature, verify_cert)
for part in self.payload.walk():
if part.get_content_type() == 'message/disposition-notification':
# logger.debug('Found MDN report for message %s:\n%s' % (
# orig_message.message_id, part.as_string()))
mdn = part.get_payload()[-1]
mdn_status = mdn['Disposition'].split(
';').pop().strip().split(':')
status = mdn_status[0]
if status == 'processed':
mdn_mic = mdn.get('Received-Content-MIC', '').split(',')[0]
# TODO: Check MIC for all cases
if mdn_mic and orig_message.mic \
and mdn_mic != orig_message.mic.decode():
status = 'processed/warning'
detailed_status = 'Message Integrity check failed.'
else:
detailed_status = ' '.join(mdn_status[1:]).strip()
return status, detailed_status |
<SYSTEM_TASK:>
Function checks if the received raw message is an AS2 MDN or not.
<END_TASK>
<USER_TASK:>
Description:
def detect_mdn(self):
""" Function checks if the received raw message is an AS2 MDN or not.
:raises MDNNotFound: If the received payload is not an MDN then this
exception is raised.
:return:
A two element tuple containing (message_id, message_recipient). The
message_id is the original AS2 message id and the message_recipient
is the original AS2 message recipient.
""" |
mdn_message = None
if self.payload.get_content_type() == 'multipart/report':
mdn_message = self.payload
elif self.payload.get_content_type() == 'multipart/signed':
for part in self.payload.walk():
if part.get_content_type() == 'multipart/report':
mdn_message = self.payload
if not mdn_message:
raise MDNNotFound('No MDN found in the received message')
message_id, message_recipient = None, None
for part in mdn_message.walk():
if part.get_content_type() == 'message/disposition-notification':
mdn = part.get_payload()[0]
message_id = mdn.get('Original-Message-ID').strip('<>')
message_recipient = mdn.get(
'Original-Recipient').split(';')[1].strip()
return message_id, message_recipient |
<SYSTEM_TASK:>
Setup the logging handlers, level and formatters.
<END_TASK>
<USER_TASK:>
Description:
def init_app(self, app):
"""Setup the logging handlers, level and formatters.
Level (DEBUG, INFO, CRITICAL, etc) is determined by the
app.config['FLASK_LOG_LEVEL'] setting, and defaults to
``None``/``logging.NOTSET``.
""" |
config_log_level = app.config.get('FLASK_LOG_LEVEL', None)
# Set up format for default logging
hostname = platform.node().split('.')[0]
formatter = (
'[%(asctime)s] %(levelname)s %(process)d [%(name)s] '
'%(filename)s:%(lineno)d - '
'[{hostname}] - %(message)s'
).format(hostname=hostname)
config_log_int = None
set_level = None
if config_log_level:
config_log_int = getattr(logging, config_log_level.upper(), None)
if not isinstance(config_log_int, int):
raise ValueError(
'Invalid log level: {0}'.format(config_log_level)
)
set_level = config_log_int
# Set to NotSet if we still aren't set yet
if not set_level:
set_level = config_log_int = logging.NOTSET
self.log_level = set_level
# Setup basic StreamHandler logging with format and level (do
# setup in case we are main, or change root logger if we aren't.
logging.basicConfig(format=formatter)
root_logger = logging.getLogger()
root_logger.setLevel(set_level)
# Get everything ready to setup the syslog handlers
address = None
if os.path.exists('/dev/log'):
address = '/dev/log'
elif os.path.exists('/var/run/syslog'):
address = '/var/run/syslog'
else:
address = ('127.0.0.1', 514)
# Add syslog handler before adding formatters
root_logger.addHandler(
SysLogHandler(address=address, facility=SysLogHandler.LOG_LOCAL0)
)
self.set_formatter(formatter)
return config_log_int |
<SYSTEM_TASK:>
Override the default log formatter with your own.
<END_TASK>
<USER_TASK:>
Description:
def set_formatter(log_formatter):
"""Override the default log formatter with your own.""" |
# Add our formatter to all the handlers
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(logging.Formatter(log_formatter)) |
<SYSTEM_TASK:>
Make an API request to the Hub
<END_TASK>
<USER_TASK:>
Description:
def api_request(path, **kwargs):
"""Make an API request to the Hub
to reduce boilerplate repetition.
""" |
url = url_path_join(os.environ['JUPYTERHUB_API_URL'], path)
client = AsyncHTTPClient()
headers = {
'Authorization': 'token %s' % os.environ['JUPYTERHUB_API_TOKEN'],
}
if kwargs.get('method') == 'POST':
kwargs.setdefault('body', '')
req = HTTPRequest(url, headers=headers, **kwargs)
return client.fetch(req) |
<SYSTEM_TASK:>
Stop a user's server and delete the user
<END_TASK>
<USER_TASK:>
Description:
async def delete_user(name):
"""Stop a user's server and delete the user""" |
app_log.info("Deleting user %s", name)
await api_request('users/{}/server'.format(name), method='DELETE')
await api_request('users/{}'.format(name), method='DELETE') |
<SYSTEM_TASK:>
Convert class labels from scalars to one-hot vectors.
<END_TASK>
<USER_TASK:>
Description:
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors.""" |
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot |
<SYSTEM_TASK:>
Serializes HTML element attributes in a name="value" pair form.
<END_TASK>
<USER_TASK:>
Description:
def _serialize_attributes(attributes):
"""Serializes HTML element attributes in a name="value" pair form.""" |
result = ''
for name, value in attributes.items():
if not value:
continue
result += ' ' + _unmangle_attribute_name(name)
result += '="' + escape(value, True) + '"'
return result |
<SYSTEM_TASK:>
Unmangles attribute names so that correct Python variable names are
<END_TASK>
<USER_TASK:>
Description:
def _unmangle_attribute_name(name):
"""Unmangles attribute names so that correct Python variable names are
used for mapping attribute names.""" |
# Python keywords cannot be used as variable names, an underscore should
# be appended at the end of each of them when defining attribute names.
name = _PYTHON_KEYWORD_MAP.get(name, name)
# Attribute names are mangled with double underscore, as colon cannot
# be used as a variable character symbol in Python. Single underscore is
# used for substituting dash.
name = name.replace('__', ':').replace('_', '-')
return name |
<SYSTEM_TASK:>
Standard HTML text escaping, but protecting against the agressive
<END_TASK>
<USER_TASK:>
Description:
def escape(string, quote=False):
"""Standard HTML text escaping, but protecting against the agressive
behavior of Jinja 2 `Markup` and the like.
""" |
if string is None:
return ''
elif hasattr(string, '__html__'):
return unicode(string)
string = string.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
string = string.replace('"', """)
return string |
<SYSTEM_TASK:>
A decorator that is used to define that the decorated block function
<END_TASK>
<USER_TASK:>
Description:
def root_block(template_name=DEFAULT_TEMPLATE_NAME):
"""A decorator that is used to define that the decorated block function
will be at the root of the block template hierarchy. In the usual case
this will be the HTML skeleton of the document, unless the template is used
to serve partial HTML rendering for Ajax.
The :func:`root_block` decorator accepts the following arguments:
:param template_name: The name of the block template hierarchy which is
passed to the :func:`render_template` document
rendering function. Different templates are useful
for rendering documents with differing layouts
(e.g. admin back-end vs. site front-end), or for
partial HTML rendering for Ajax.
""" |
def decorator(block_func):
block = RootBlock(block_func, template_name)
return block_func
return decorator |
<SYSTEM_TASK:>
A decorator that is used for inserting the decorated block function in
<END_TASK>
<USER_TASK:>
Description:
def block(context_name, parent_block_func, view_func=None):
"""A decorator that is used for inserting the decorated block function in
the block template hierarchy.
The :func:`block` decorator accepts the following arguments:
:param context_name: key in the `g.blocks` dictionary in which the result
of the decorated block function will be stored for
further processing by the parent block function
`parent_block_func`.
:param parent_block_func: parent block function in the template hierarchy
which will use the stored result.
:param view_func: the decorated block will take an effect only in the
execution context of the specified view function. If the
default value of `None` is used, then the block will be
used as default for the specified `context_name`.
Internally this parameter is converted to a Werkzeug
endpoint in the same way Flask is doing that with the
`Flask.route` decorator.
""" |
def decorator(block_func):
block = Block(block_func, view_func)
parent_block = Block.block_mapping[parent_block_func]
parent_block.append_context_block(context_name, block)
return block_func
return decorator |
<SYSTEM_TASK:>
Calculate the number of entropy bits in a list or tuple of elements.
<END_TASK>
<USER_TASK:>
Description:
def entropy_bits(
lst: Union[
List[Union[int, str, float, complex]],
Tuple[Union[int, str, float, complex]]
]
) -> float:
"""Calculate the number of entropy bits in a list or tuple of elements.""" |
# Based on https://stackoverflow.com/a/45091961
if not isinstance(lst, (list, tuple)):
raise TypeError('lst must be a list or a tuple')
for num in lst:
if not isinstance(num, (int, str, float, complex)):
raise TypeError('lst can only be comprised of int, str, float, '
'complex')
n_lst = len(lst)
if n_lst <= 1:
return 0.0
# Some NumPy replacements
counts = [lst.count(x) for x in lst]
probs = [c / n_lst for c in counts]
# Compute entropy
entropy = 0.0
for prob in probs:
entropy -= prob * log2(prob)
return entropy |
<SYSTEM_TASK:>
Calculate the number of entropy bits in a range of numbers.
<END_TASK>
<USER_TASK:>
Description:
def entropy_bits_nrange(
minimum: Union[int, float], maximum: Union[int, float]
) -> float:
"""Calculate the number of entropy bits in a range of numbers.""" |
# Shannon:
# d = fabs(maximum - minimum)
# ent = -(1/d) * log(1/d, 2) * d
# Aprox form: log10(digits) * log2(10)
if not isinstance(minimum, (int, float)):
raise TypeError('minimum can only be int or float')
if not isinstance(maximum, (int, float)):
raise TypeError('maximum can only be int or float')
if minimum < 0:
raise ValueError('minimum should be greater than 0')
if maximum < 0:
raise ValueError('maximum should be greater than 0')
dif = fabs(maximum - minimum)
if dif == 0:
return 0.0
ent = log10(dif) * 3.321928
return ent |
<SYSTEM_TASK:>
Calculate the length of a password for a given entropy and chars.
<END_TASK>
<USER_TASK:>
Description:
def password_length_needed(entropybits: Union[int, float], chars: str) -> int:
"""Calculate the length of a password for a given entropy and chars.""" |
if not isinstance(entropybits, (int, float)):
raise TypeError('entropybits can only be int or float')
if entropybits < 0:
raise ValueError('entropybits should be greater than 0')
if not isinstance(chars, str):
raise TypeError('chars can only be string')
if not chars:
raise ValueError("chars can't be null")
# entropy_bits(list(characters)) = 6.554588
entropy_c = entropy_bits(list(chars))
return ceil(entropybits / entropy_c) |
<SYSTEM_TASK:>
Calculate words needed for a passphrase based on entropy.
<END_TASK>
<USER_TASK:>
Description:
def words_amount_needed(entropybits: Union[int, float],
entropy_w: Union[int, float],
entropy_n: Union[int, float],
amount_n: int) -> int:
"""Calculate words needed for a passphrase based on entropy.""" |
# Thanks to @julianor for this tip to calculate default amount of
# entropy: minbitlen/log2(len(wordlist)).
# I set the minimum entropy bits and calculate the amount of words
# needed, cosidering the entropy of the wordlist.
# Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN
if not isinstance(entropybits, (int, float)):
raise TypeError('entropybits can only be int or float')
if not isinstance(entropy_w, (int, float)):
raise TypeError('entropy_w can only be int or float')
if not isinstance(entropy_n, (int, float)):
raise TypeError('entropy_n can only be int or float')
if not isinstance(amount_n, int):
raise TypeError('amount_n can only be int')
if entropybits < 0:
raise ValueError('entropybits should be greater than 0')
if entropy_w <= 0:
raise ValueError('entropy_w should be greater than 0')
if entropy_n < 0:
raise ValueError('entropy_n should be greater than 0')
if amount_n < 0:
raise ValueError('amount_n should be greater than 0')
amount_w = (entropybits - entropy_n * amount_n) / entropy_w
if amount_w > -1.0:
return ceil(fabs(amount_w))
return 0 |
<SYSTEM_TASK:>
Calculate the entropy of a password with given length and chars.
<END_TASK>
<USER_TASK:>
Description:
def password_entropy(length: int, chars: str) -> float:
"""Calculate the entropy of a password with given length and chars.""" |
if not isinstance(length, int):
raise TypeError('length can only be int')
if length < 0:
raise ValueError('length should be greater than 0')
if not isinstance(chars, str):
raise TypeError('chars can only be string')
if not chars:
raise ValueError("chars can't be null")
if length == 0:
return 0.0
entropy_c = entropy_bits(list(chars))
return float(length * entropy_c) |
<SYSTEM_TASK:>
Calculate the entropy of a passphrase with given words and numbers.
<END_TASK>
<USER_TASK:>
Description:
def passphrase_entropy(amount_w: int,
entropy_w: Union[int, float],
entropy_n: Union[int, float],
amount_n: int) -> float:
"""Calculate the entropy of a passphrase with given words and numbers.""" |
if not isinstance(amount_w, int):
raise TypeError('amount_w can only be int')
if not isinstance(entropy_w, (int, float)):
raise TypeError('entropy_w can only be int or float')
if not isinstance(entropy_n, (int, float)):
raise TypeError('entropy_n can only be int or float')
if not isinstance(amount_n, int):
raise TypeError('amount_n can only be int')
if amount_w < 0:
raise ValueError('amount_w should be greater than 0')
if entropy_w < 0:
raise ValueError('entropy_w should be greater than 0')
if entropy_n < 0:
raise ValueError('entropy_n should be greater than 0')
if amount_n < 0:
raise ValueError('amount_n should be greater than 0')
return float(amount_w * entropy_w + amount_n * entropy_n) |
<SYSTEM_TASK:>
Calculate the entropy of a wordlist or a numerical range.
<END_TASK>
<USER_TASK:>
Description:
def entropy_bits(
lst: Union[
List[Union[int, str, float, complex]],
Tuple[Union[int, str, float, complex]]
]
) -> float:
"""Calculate the entropy of a wordlist or a numerical range.
Keyword arguments:
lst -- A wordlist as list or tuple, or a numerical range as a list:
(minimum, maximum)
""" |
if not isinstance(lst, (tuple, list)):
raise TypeError('lst must be a list or a tuple')
size = len(lst)
if (
size == 2
and isinstance(lst[0], (int, float))
and isinstance(lst[1], (int, float))
):
return calc_entropy_bits_nrange(lst[0], lst[1])
return calc_entropy_bits(lst) |
<SYSTEM_TASK:>
Import words for the wordlist from a given file.
<END_TASK>
<USER_TASK:>
Description:
def import_words_from_file(self,
inputfile: str,
is_diceware: bool) -> None:
"""Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
""" |
if not Aux.isfile_notempty(inputfile):
raise FileNotFoundError('Input file does not exists, is not valid '
'or is empty: {}'.format(inputfile))
self._wordlist_entropy_bits = None
if is_diceware:
self._wordlist = self._read_words_from_diceware(inputfile)
else:
self._wordlist = self._read_words_from_wordfile(inputfile) |
<SYSTEM_TASK:>
Calculate the needed password length to satisfy the entropy number.
<END_TASK>
<USER_TASK:>
Description:
def password_length_needed(self) -> int:
"""Calculate the needed password length to satisfy the entropy number.
This is for the given character set.
""" |
characters = self._get_password_characters()
if (
self.entropy_bits_req is None
or not characters
):
raise ValueError("Can't calculate the password length needed: "
"entropy_bits_req isn't set or the character "
"set is empty")
return calc_password_length_needed(
self.entropy_bits_req,
characters
) |
<SYSTEM_TASK:>
Calculate the needed amount of words to satisfy the entropy number.
<END_TASK>
<USER_TASK:>
Description:
def words_amount_needed(self) -> int:
"""Calculate the needed amount of words to satisfy the entropy number.
This is for the given wordlist.
""" |
if (
self.entropy_bits_req is None
or self.amount_n is None
or not self.wordlist
):
raise ValueError("Can't calculate the words amount needed: "
"wordlist is empty or entropy_bits_req or "
"amount_n isn't set")
# Thanks to @julianor for this tip to calculate default amount of
# entropy: minbitlen/log2(len(wordlist)).
# I set the minimum entropy bits and calculate the amount of words
# needed, cosidering the entropy of the wordlist.
# Then: entropy_w * amount_w + entropy_n * amount_n >= ENTROPY_BITS_MIN
entropy_n = self.entropy_bits((self.randnum_min, self.randnum_max))
# The entropy for EFF Large Wordlist is ~12.9, no need to calculate
entropy_w = self._wordlist_entropy_bits \
if self._wordlist_entropy_bits \
else self.entropy_bits(self.wordlist)
return calc_words_amount_needed(
self.entropy_bits_req,
entropy_w,
entropy_n,
self.amount_n
) |
<SYSTEM_TASK:>
Calculate the entropy of a password that would be generated.
<END_TASK>
<USER_TASK:>
Description:
def generated_password_entropy(self) -> float:
"""Calculate the entropy of a password that would be generated.""" |
characters = self._get_password_characters()
if (
self.passwordlen is None
or not characters
):
raise ValueError("Can't calculate the password entropy: character"
" set is empty or passwordlen isn't set")
if self.passwordlen == 0:
return 0.0
return calc_password_entropy(self.passwordlen, characters) |
<SYSTEM_TASK:>
Calculate the entropy of a passphrase that would be generated.
<END_TASK>
<USER_TASK:>
Description:
def generated_passphrase_entropy(self) -> float:
"""Calculate the entropy of a passphrase that would be generated.""" |
if (
self.amount_w is None
or self.amount_n is None
or not self.wordlist
):
raise ValueError("Can't calculate the passphrase entropy: "
"wordlist is empty or amount_n or "
"amount_w isn't set")
if self.amount_n == 0 and self.amount_w == 0:
return 0.0
entropy_n = self.entropy_bits((self.randnum_min, self.randnum_max))
# The entropy for EFF Large Wordlist is ~12.9, no need to calculate
entropy_w = self._wordlist_entropy_bits \
if self._wordlist_entropy_bits \
else self.entropy_bits(self.wordlist)
return calc_passphrase_entropy(
self.amount_w,
entropy_w,
entropy_n,
self.amount_n
) |
<SYSTEM_TASK:>
Generate a list of words randomly chosen from a wordlist.
<END_TASK>
<USER_TASK:>
Description:
def generate(self, uppercase: int = None) -> list:
"""Generate a list of words randomly chosen from a wordlist.
Keyword arguments:
uppercase -- An integer number indicating how many uppercase
characters are wanted: bigger than zero means that many characters and
lower than zero means all uppercase except that many. Use 0 to make
them all uppercase, and None for no one.
""" |
if (
self.amount_n is None
or self.amount_w is None
or not self.wordlist
):
raise ValueError("Can't generate passphrase: "
"wordlist is empty or amount_n or "
"amount_w isn't set")
if uppercase is not None and not isinstance(uppercase, int):
raise TypeError('uppercase must be an integer number')
passphrase = []
for _ in range(0, self.amount_w):
passphrase.append(randchoice(self.wordlist).lower())
# Handle uppercase
lowercase = Aux.lowercase_count(passphrase)
if passphrase and uppercase is not None:
if (
uppercase < 0
and lowercase > (uppercase * -1)
):
uppercase = lowercase + uppercase
# If it's still negative, then means no uppercase
if uppercase == 0 or uppercase > lowercase:
# Make it all uppercase
passphrase = Aux.make_all_uppercase(passphrase)
elif uppercase > 0:
passphrase = Aux.make_chars_uppercase(
passphrase,
uppercase
)
# Handle numbers
for _ in range(0, self.amount_n):
passphrase.append(randbetween(MIN_NUM, MAX_NUM))
self.last_result = passphrase
return passphrase |
<SYSTEM_TASK:>
Generate a list of random characters.
<END_TASK>
<USER_TASK:>
Description:
def generate_password(self) -> list:
"""Generate a list of random characters.""" |
characterset = self._get_password_characters()
if (
self.passwordlen is None
or not characterset
):
raise ValueError("Can't generate password: character set is "
"empty or passwordlen isn't set")
password = []
for _ in range(0, self.passwordlen):
password.append(randchoice(characterset))
self.last_result = password
return password |
<SYSTEM_TASK:>
Generate a list of parts of a UUID version 4 string.
<END_TASK>
<USER_TASK:>
Description:
def generate_uuid4(self) -> list:
"""Generate a list of parts of a UUID version 4 string.
Usually, these parts are concatenated together using dashes.
""" |
# uuid4: 8-4-4-4-12: xxxxxxxx-xxxx-4xxx-{8,9,a,b}xxx-xxxxxxxxxxxx
# instead of requesting small amounts of bytes, it's better to do it
# for the full amount of them.
hexstr = randhex(30)
uuid4 = [
hexstr[:8],
hexstr[8:12],
'4' + hexstr[12:15],
'{:x}{}'.format(randbetween(8, 11), hexstr[15:18]),
hexstr[18:]
]
self.last_result = uuid4
return uuid4 |
<SYSTEM_TASK:>
generate fk field related to class wait popup crud
<END_TASK>
<USER_TASK:>
Description:
def get_fk_popup_field(cls, *args, **kwargs):
"""
generate fk field related to class wait popup crud
""" |
kwargs['popup_name'] = cls.get_class_verbose_name()
kwargs['permissions_required'] = cls.permissions_required
if cls.template_name_fk is not None:
kwargs['template_name'] = cls.template_name_fk
return ForeignKeyWidget('{}_popup_create'.format(cls.get_class_name()), *args, **kwargs) |
<SYSTEM_TASK:>
generate m2m field related to class wait popup crud
<END_TASK>
<USER_TASK:>
Description:
def get_m2m_popup_field(cls, *args, **kwargs):
"""
generate m2m field related to class wait popup crud
""" |
kwargs['popup_name'] = cls.get_class_verbose_name()
kwargs['permissions_required'] = cls.permissions_required
if cls.template_name_m2m is not None:
kwargs['template_name'] = cls.template_name_m2m
return ManyToManyWidget('{}_popup_create'.format(cls.get_class_name()), *args, **kwargs) |
<SYSTEM_TASK:>
Make all characters uppercase.
<END_TASK>
<USER_TASK:>
Description:
def make_all_uppercase(
lst: Union[list, tuple, str, set]
) -> Union[list, tuple, str, set]:
"""Make all characters uppercase.
It supports characters in a (mix of) list, tuple, set or string.
The return value is of the same type of the input value.
""" |
if not isinstance(lst, (list, tuple, str, set)):
raise TypeError('lst must be a list, a tuple, a set or a string')
if isinstance(lst, str):
return lst.upper()
arr = list(lst)
# enumerate is 70% slower than range
# for i in range(len(lst)):
# if isinstance(arr[i], (list, tuple, str, set)):
# arr[i] = Aux.make_all_uppercase(arr[i])
arr[:] = [
Aux.make_all_uppercase(element) if (
isinstance(element, (list, tuple, str, set))
) else element for element in arr
]
if isinstance(lst, set):
return set(arr)
elif isinstance(lst, tuple):
return tuple(arr)
return arr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.