text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Local method handling error while processing the SQL
<END_TASK>
<USER_TASK:>
Description:
def handle_query_error(msg, query, session, payload=None):
"""Local method handling error while processing the SQL""" |
payload = payload or {}
troubleshooting_link = config['TROUBLESHOOTING_LINK']
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error': msg,
})
if troubleshooting_link:
payload['link'] = troubleshooting_link
return payload |
<SYSTEM_TASK:>
attemps to get the query and retry if it cannot
<END_TASK>
<USER_TASK:>
Description:
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot""" |
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query |
<SYSTEM_TASK:>
Returns l without what is in minus
<END_TASK>
<USER_TASK:>
Description:
def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
""" |
return [o for o in l if o not in minus] |
<SYSTEM_TASK:>
Returns ``datetime.datetime`` from human readable strings
<END_TASK>
<USER_TASK:>
Description:
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
""" |
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm |
<SYSTEM_TASK:>
Function to be passed into json.loads obj_hook parameter
<END_TASK>
<USER_TASK:>
Description:
def decode_dashboards(o):
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
""" |
import superset.models.core as models
from superset.connectors.sqla.models import (
SqlaTable, SqlMetric, TableColumn,
)
if '__Dashboard__' in o:
d = models.Dashboard()
d.__dict__.update(o['__Dashboard__'])
return d
elif '__Slice__' in o:
d = models.Slice()
d.__dict__.update(o['__Slice__'])
return d
elif '__TableColumn__' in o:
d = TableColumn()
d.__dict__.update(o['__TableColumn__'])
return d
elif '__SqlaTable__' in o:
d = SqlaTable()
d.__dict__.update(o['__SqlaTable__'])
return d
elif '__SqlMetric__' in o:
d = SqlMetric()
d.__dict__.update(o['__SqlMetric__'])
return d
elif '__datetime__' in o:
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
else:
return o |
<SYSTEM_TASK:>
Formats datetime to take less room when it is recent
<END_TASK>
<USER_TASK:>
Description:
def datetime_f(dttm):
"""Formats datetime to take less room when it is recent""" |
if dttm:
dttm = dttm.isoformat()
now_iso = datetime.now().isoformat()
if now_iso[:10] == dttm[:10]:
dttm = dttm[11:]
elif now_iso[:4] == dttm[:4]:
dttm = dttm[5:]
return '<nobr>{}</nobr>'.format(dttm) |
<SYSTEM_TASK:>
Translate exception into error message
<END_TASK>
<USER_TASK:>
Description:
def error_msg_from_exception(e):
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
""" |
msg = ''
if hasattr(e, 'message'):
if isinstance(e.message, dict):
msg = e.message.get('message')
elif e.message:
msg = '{}'.format(e.message)
return msg or '{}'.format(e) |
<SYSTEM_TASK:>
Utility to find a foreign-key constraint name in alembic migrations
<END_TASK>
<USER_TASK:>
Description:
def generic_find_fk_constraint_name(table, columns, referenced, insp):
"""Utility to find a foreign-key constraint name in alembic migrations""" |
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
return fk['name'] |
<SYSTEM_TASK:>
Utility to find foreign-key constraint names in alembic migrations
<END_TASK>
<USER_TASK:>
Description:
def generic_find_fk_constraint_names(table, columns, referenced, insp):
"""Utility to find foreign-key constraint names in alembic migrations""" |
names = set()
for fk in insp.get_foreign_keys(table):
if fk['referred_table'] == referenced and set(fk['referred_columns']) == columns:
names.add(fk['name'])
return names |
<SYSTEM_TASK:>
Utility to find a unique constraint name in alembic migrations
<END_TASK>
<USER_TASK:>
Description:
def generic_find_uq_constraint_name(table, columns, insp):
"""Utility to find a unique constraint name in alembic migrations""" |
for uq in insp.get_unique_constraints(table):
if columns == set(uq['column_names']):
return uq['name'] |
<SYSTEM_TASK:>
Return `since` and `until` date time tuple from string representations of
<END_TASK>
<USER_TASK:>
Description:
def get_since_until(time_range: Optional[str] = None,
since: Optional[str] = None,
until: Optional[str] = None,
time_shift: Optional[str] = None,
relative_end: Optional[str] = None) -> Tuple[datetime, datetime]:
"""Return `since` and `until` date time tuple from string representations of
time_range, since, until and time_shift.
This functiom supports both reading the keys separately (from `since` and
`until`), as well as the new `time_range` key. Valid formats are:
- ISO 8601
- X days/years/hours/day/year/weeks
- X days/years/hours/day/year/weeks ago
- X days/years/hours/day/year/weeks from now
- freeform
Additionally, for `time_range` (these specify both `since` and `until`):
- Last day
- Last week
- Last month
- Last quarter
- Last year
- No filter
- Last X seconds/minutes/hours/days/weeks/months/years
- Next X seconds/minutes/hours/days/weeks/months/years
""" |
separator = ' : '
relative_end = parse_human_datetime(relative_end if relative_end else 'today')
common_time_frames = {
'Last day': (relative_end - relativedelta(days=1), relative_end), # noqa: T400
'Last week': (relative_end - relativedelta(weeks=1), relative_end), # noqa: T400
'Last month': (relative_end - relativedelta(months=1), relative_end), # noqa: E501, T400
'Last quarter': (relative_end - relativedelta(months=3), relative_end), # noqa: E501, T400
'Last year': (relative_end - relativedelta(years=1), relative_end), # noqa: T400
}
if time_range:
if separator in time_range:
since, until = time_range.split(separator, 1)
if since and since not in common_time_frames:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until)
elif time_range in common_time_frames:
since, until = common_time_frames[time_range]
elif time_range == 'No filter':
since = until = None
else:
rel, num, grain = time_range.split()
if rel == 'Last':
since = relative_end - relativedelta(**{grain: int(num)}) # noqa: T400
until = relative_end
else: # rel == 'Next'
since = relative_end
until = relative_end + relativedelta(**{grain: int(num)}) # noqa: T400
else:
since = since or ''
if since:
since = add_ago_to_since(since)
since = parse_human_datetime(since)
until = parse_human_datetime(until) if until else relative_end
if time_shift:
time_shift = parse_human_timedelta(time_shift)
since = since if since is None else (since - time_shift) # noqa: T400
until = until if until is None else (until - time_shift) # noqa: T400
if since and until and since > until:
raise ValueError(_('From date cannot be larger than to date'))
return since, until |
<SYSTEM_TASK:>
Mutates form data to restructure the adhoc filters in the form of the four base
<END_TASK>
<USER_TASK:>
Description:
def split_adhoc_filters_into_base_filters(fd):
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
""" |
adhoc_filters = fd.get('adhoc_filters')
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get('expressionType')
clause = adhoc_filter.get('clause')
if expression_type == 'SIMPLE':
if clause == 'WHERE':
simple_where_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif clause == 'HAVING':
simple_having_filters.append({
'col': adhoc_filter.get('subject'),
'op': adhoc_filter.get('operator'),
'val': adhoc_filter.get('comparator'),
})
elif expression_type == 'SQL':
if clause == 'WHERE':
sql_where_filters.append(adhoc_filter.get('sqlExpression'))
elif clause == 'HAVING':
sql_having_filters.append(adhoc_filter.get('sqlExpression'))
fd['where'] = ' AND '.join(['({})'.format(sql) for sql in sql_where_filters])
fd['having'] = ' AND '.join(['({})'.format(sql) for sql in sql_having_filters])
fd['having_filters'] = simple_having_filters
fd['filters'] = simple_where_filters |
<SYSTEM_TASK:>
Loads an energy related dataset to use with sankey and graphs
<END_TASK>
<USER_TASK:>
Description:
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs""" |
tbl_name = 'energy_usage'
data = get_example_data('energy.json.gz')
pdf = pd.read_json(data)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print('Creating table [wb_health_population] reference')
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = 'Energy consumption'
tbl.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'sum__value' for col in tbl.metrics):
tbl.metrics.append(SqlMetric(
metric_name='sum__value',
expression='SUM(value)',
))
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name='Energy Sankey',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Energy Force Layout',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name='Heatmap',
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
"""),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc) |
<SYSTEM_TASK:>
Starts a Superset web server.
<END_TASK>
<USER_TASK:>
Description:
def runserver(debug, console_log, use_reloader, address, port, timeout, workers, socket):
"""Starts a Superset web server.""" |
debug = debug or config.get('DEBUG') or console_log
if debug:
print(Fore.BLUE + '-=' * 20)
print(
Fore.YELLOW + 'Starting Superset server in ' +
Fore.RED + 'DEBUG' +
Fore.YELLOW + ' mode')
print(Fore.BLUE + '-=' * 20)
print(Style.RESET_ALL)
if console_log:
console_log_run(app, port, use_reloader)
else:
debug_run(app, port, use_reloader)
else:
logging.info(
"The Gunicorn 'superset runserver' command is deprecated. Please "
"use the 'gunicorn' command instead.")
addr_str = f' unix:{socket} ' if socket else f' {address}:{port} '
cmd = (
'gunicorn '
f'-w {workers} '
f'--timeout {timeout} '
f'-b {addr_str} '
'--limit-request-line 0 '
'--limit-request-field_size 0 '
'superset:app'
)
print(Fore.GREEN + 'Starting server with command: ')
print(Fore.YELLOW + cmd)
print(Style.RESET_ALL)
Popen(cmd, shell=True).wait() |
<SYSTEM_TASK:>
Prints the current version number
<END_TASK>
<USER_TASK:>
Description:
def version(verbose):
"""Prints the current version number""" |
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL) |
<SYSTEM_TASK:>
Starts a Superset worker for async SQL query execution.
<END_TASK>
<USER_TASK:>
Description:
def worker(workers):
"""Starts a Superset worker for async SQL query execution.""" |
logging.info(
"The 'superset worker' command is deprecated. Please use the 'celery "
"worker' command instead.")
if workers:
celery_app.conf.update(CELERYD_CONCURRENCY=workers)
elif config.get('SUPERSET_CELERY_WORKERS'):
celery_app.conf.update(
CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
worker = celery_app.Worker(optimization='fair')
worker.start() |
<SYSTEM_TASK:>
Runs a Celery Flower web server
<END_TASK>
<USER_TASK:>
Description:
def flower(port, address):
"""Runs a Celery Flower web server
Celery Flower is a UI to monitor the Celery operation on a given
broker""" |
BROKER_URL = celery_app.conf.BROKER_URL
cmd = (
'celery flower '
f'--broker={BROKER_URL} '
f'--port={port} '
f'--address={address} '
)
logging.info(
"The 'superset flower' command is deprecated. Please use the 'celery "
"flower' command instead.")
print(Fore.GREEN + 'Starting a Celery Flower instance')
print(Fore.BLUE + '-=' * 40)
print(Fore.YELLOW + cmd)
print(Fore.BLUE + '-=' * 40)
Popen(cmd, shell=True).wait() |
<SYSTEM_TASK:>
endpoint that refreshes druid datasources metadata
<END_TASK>
<USER_TASK:>
Description:
def refresh_datasources(self, refreshAll=True):
"""endpoint that refreshes druid datasources metadata""" |
session = db.session()
DruidCluster = ConnectorRegistry.sources['druid'].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refreshAll=refreshAll)
except Exception as e:
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(e)),
'danger')
logging.exception(e)
pass
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_('Refreshed metadata from cluster [{}]').format(
cluster.cluster_name),
'info')
session.commit()
return redirect('/druiddatasourcemodelview/list/') |
<SYSTEM_TASK:>
converts the non-negative number list into a string.
<END_TASK>
<USER_TASK:>
Description:
def convert_to_str(l: Node) -> str:
"""
converts the non-negative number list into a string.
""" |
result = ""
while l:
result += str(l.val)
l = l.next
return result |
<SYSTEM_TASK:>
The length of longest common subsequence among the two given strings s1 and s2
<END_TASK>
<USER_TASK:>
Description:
def lcs(s1, s2, i, j):
"""
The length of longest common subsequence among the two given strings s1 and s2
""" |
if i == 0 or j == 0:
return 0
elif s1[i - 1] == s2[j - 1]:
return 1 + lcs(s1, s2, i - 1, j - 1)
else:
return max(lcs(s1, s2, i - 1, j), lcs(s1, s2, i, j - 1)) |
<SYSTEM_TASK:>
find the nth digit of given number.
<END_TASK>
<USER_TASK:>
Description:
def find_nth_digit(n):
"""find the nth digit of given number.
1. find the length of the number where the nth digit is from.
2. find the actual number where the nth digit is from
3. find the nth digit and return
""" |
length = 1
count = 9
start = 1
while n > length * count:
n -= length * count
length += 1
count *= 10
start *= 10
start += (n-1) / length
s = str(start)
return int(s[(n-1) % length]) |
<SYSTEM_TASK:>
Return True if n is a prime number
<END_TASK>
<USER_TASK:>
Description:
def prime_check(n):
"""Return True if n is a prime number
Else return False.
""" |
if n <= 1:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
j = 5
while j * j <= n:
if n % j == 0 or n % (j + 2) == 0:
return False
j += 6
return True |
<SYSTEM_TASK:>
Find the length of the longest substring
<END_TASK>
<USER_TASK:>
Description:
def longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
""" |
if string is None:
return 0
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
max_length = max(max_length, i - j + 1)
return max_length |
<SYSTEM_TASK:>
Find the length of the longest substring
<END_TASK>
<USER_TASK:>
Description:
def longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
""" |
if string is None:
return 0
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
max_len = max(max_len, index - start + 1)
used_char[char] = index
return max_len |
<SYSTEM_TASK:>
Find the length of the longest substring
<END_TASK>
<USER_TASK:>
Description:
def get_longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
Return max_len and the substring as a tuple
""" |
if string is None:
return 0, ''
sub_string = ''
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
if i - j + 1 > max_length:
max_length = i - j + 1
sub_string = string[j: i + 1]
return max_length, sub_string |
<SYSTEM_TASK:>
Find the length of the longest substring
<END_TASK>
<USER_TASK:>
Description:
def get_longest_non_repeat_v2(string):
"""
Find the length of the longest substring
without repeating characters.
Uses alternative algorithm.
Return max_len and the substring as a tuple
""" |
if string is None:
return 0, ''
sub_string = ''
start, max_len = 0, 0
used_char = {}
for index, char in enumerate(string):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
if index - start + 1 > max_len:
max_len = index - start + 1
sub_string = string[start: index + 1]
used_char[char] = index
return max_len, sub_string |
<SYSTEM_TASK:>
Push the item in the priority queue.
<END_TASK>
<USER_TASK:>
Description:
def push(self, item, priority=None):
"""Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
""" |
priority = item if priority is None else priority
node = PriorityQueueNode(item, priority)
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
self.priority_queue_list.insert(index, node)
return
# when traversed complete queue
self.priority_queue_list.append(node) |
<SYSTEM_TASK:>
Takes as input multi dimensional iterable and
<END_TASK>
<USER_TASK:>
Description:
def flatten_iter(iterable):
"""
Takes as input multi dimensional iterable and
returns generator which produces one dimensional output.
""" |
for element in iterable:
if isinstance(element, Iterable):
yield from flatten_iter(element)
else:
yield element |
<SYSTEM_TASK:>
Iterable to get every convolution window per loop iteration.
<END_TASK>
<USER_TASK:>
Description:
def convolved(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""Iterable to get every convolution window per loop iteration.
For example:
`convolved([1, 2, 3, 4], kernel_size=2)`
will produce the following result:
`[[1, 2], [2, 3], [3, 4]]`.
`convolved([1, 2, 3], kernel_size=2, stride=1, padding=2, default_value=42)`
will produce the following result:
`[[42, 42], [42, 1], [1, 2], [2, 3], [3, 42], [42, 42]]`
Arguments:
iterable: An object to iterate on. It should support slice indexing if `padding == 0`.
kernel_size: The number of items yielded at every iteration.
stride: The step size between each iteration.
padding: Padding must be an integer or a string with value `SAME` or `VALID`. If it is an integer, it represents
how many values we add with `default_value` on the borders. If it is a string, `SAME` means that the
convolution will add some padding according to the kernel_size, and `VALID` is the same as
specifying `padding=0`.
default_value: Default fill value for padding and values outside iteration range.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
""" |
# Input validation and error messages
if not hasattr(iterable, '__iter__'):
raise ValueError(
"Can't iterate on object.".format(
iterable))
if stride < 1:
raise ValueError(
"Stride must be of at least one. Got `stride={}`.".format(
stride))
if not (padding in ['SAME', 'VALID'] or type(padding) in [int]):
raise ValueError(
"Padding must be an integer or a string with value `SAME` or `VALID`.")
if not isinstance(padding, str):
if padding < 0:
raise ValueError(
"Padding must be of at least zero. Got `padding={}`.".format(
padding))
else:
if padding == 'SAME':
padding = kernel_size // 2
elif padding == 'VALID':
padding = 0
if not type(iterable) == list:
iterable = list(iterable)
# Add padding to iterable
if padding > 0:
pad = [default_value] * padding
iterable = pad + list(iterable) + pad
# Fill missing value to the right
remainder = (kernel_size - len(iterable)) % stride
extra_pad = [default_value] * remainder
iterable = iterable + extra_pad
i = 0
while True:
if i > len(iterable) - kernel_size:
break
yield iterable[i:i + kernel_size]
i += stride |
<SYSTEM_TASK:>
1D Iterable to get every convolution window per loop iteration.
<END_TASK>
<USER_TASK:>
Description:
def convolved_1d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""1D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
""" |
return convolved(iterable, kernel_size, stride, padding, default_value) |
<SYSTEM_TASK:>
2D Iterable to get every convolution window per loop iteration.
<END_TASK>
<USER_TASK:>
Description:
def convolved_2d(iterable, kernel_size=1, stride=1, padding=0, default_value=None):
"""2D Iterable to get every convolution window per loop iteration.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
""" |
kernel_size = dimensionize(kernel_size, nd=2)
stride = dimensionize(stride, nd=2)
padding = dimensionize(padding, nd=2)
for row_packet in convolved(iterable, kernel_size[0], stride[0], padding[0], default_value):
transposed_inner = []
for col in tuple(row_packet):
transposed_inner.append(list(
convolved(col, kernel_size[1], stride[1], padding[1], default_value)
))
if len(transposed_inner) > 0:
for col_i in range(len(transposed_inner[0])):
yield tuple(row_j[col_i] for row_j in transposed_inner) |
<SYSTEM_TASK:>
Convert integers to a list of integers to fit the number of dimensions if
<END_TASK>
<USER_TASK:>
Description:
def dimensionize(maybe_a_list, nd=2):
"""Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
""" |
if not hasattr(maybe_a_list, '__iter__'):
# Argument is probably an integer so we map it to a list of size `nd`.
now_a_list = [maybe_a_list] * nd
return now_a_list
else:
# Argument is probably an `nd`-sized list.
return maybe_a_list |
<SYSTEM_TASK:>
Merge intervals in the form of a list.
<END_TASK>
<USER_TASK:>
Description:
def merge_intervals(intervals):
""" Merge intervals in the form of a list. """ |
if intervals is None:
return None
intervals.sort(key=lambda i: i[0])
out = [intervals.pop(0)]
for i in intervals:
if out[-1][-1] >= i[0]:
out[-1][-1] = max(out[-1][-1], i[-1])
else:
out.append(i)
return out |
<SYSTEM_TASK:>
Merge two intervals into one.
<END_TASK>
<USER_TASK:>
Description:
def merge(intervals):
""" Merge two intervals into one. """ |
out = []
for i in sorted(intervals, key=lambda i: i.start):
if out and i.start <= out[-1].end:
out[-1].end = max(out[-1].end, i.end)
else:
out += i,
return out |
<SYSTEM_TASK:>
Max heapify helper for max_heap_sort
<END_TASK>
<USER_TASK:>
Description:
def max_heapify(arr, end, simulation, iteration):
""" Max heapify helper for max_heap_sort
""" |
last_parent = (end - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find greatest child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end and arr[child] < arr[child + 1]:
child = child + 1
# Swap if child is greater than parent
if arr[child] > arr[current_parent]:
arr[current_parent], arr[child] = arr[child], arr[current_parent]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
arr[0], arr[end] = arr[end], arr[0]
return iteration |
<SYSTEM_TASK:>
Min heapify helper for min_heap_sort
<END_TASK>
<USER_TASK:>
Description:
def min_heapify(arr, start, simulation, iteration):
""" Min heapify helper for min_heap_sort
""" |
# Offset last_parent by the start (last_parent calculated as if start index was 0)
# All array accesses need to be offset by start
end = len(arr) - 1
last_parent = (end - start - 1) // 2
# Iterate from last parent to first
for parent in range(last_parent, -1, -1):
current_parent = parent
# Iterate from current_parent to last_parent
while current_parent <= last_parent:
# Find lesser child of current_parent
child = 2 * current_parent + 1
if child + 1 <= end - start and arr[child + start] > arr[
child + 1 + start]:
child = child + 1
# Swap if child is less than parent
if arr[child + start] < arr[current_parent + start]:
arr[current_parent + start], arr[child + start] = \
arr[child + start], arr[current_parent + start]
current_parent = child
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
# If no swap occured, no need to keep iterating
else:
break
return iteration |
<SYSTEM_TASK:>
the RSA key generating algorithm
<END_TASK>
<USER_TASK:>
Description:
def generate_key(k, seed=None):
"""
the RSA key generating algorithm
k is the number of bits in n
""" |
def modinv(a, m):
"""calculate the inverse of a mod m
that is, find b such that (a * b) % m == 1"""
b = 1
while not (a * b) % m == 1:
b += 1
return b
def gen_prime(k, seed=None):
"""generate a prime with k bits"""
def is_prime(num):
if num == 2:
return True
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
return False
return True
random.seed(seed)
while True:
key = random.randrange(int(2 ** (k - 1)), int(2 ** k))
if is_prime(key):
return key
# size in bits of p and q need to add up to the size of n
p_size = k / 2
q_size = k - p_size
e = gen_prime(k, seed) # in many cases, e is also chosen to be a small constant
while True:
p = gen_prime(p_size, seed)
if p % e != 1:
break
while True:
q = gen_prime(q_size, seed)
if q % e != 1:
break
n = p * q
l = (p - 1) * (q - 1) # calculate totient function
d = modinv(e, l)
return int(n), int(e), int(d) |
<SYSTEM_TASK:>
Return square root of n, with maximum absolute error epsilon
<END_TASK>
<USER_TASK:>
Description:
def square_root(n, epsilon=0.001):
"""Return square root of n, with maximum absolute error epsilon""" |
guess = n / 2
while abs(guess * guess - n) > epsilon:
guess = (guess + (n / guess)) / 2
return guess |
<SYSTEM_TASK:>
Calculate the powerset of any iterable.
<END_TASK>
<USER_TASK:>
Description:
def powerset(iterable):
"""Calculate the powerset of any iterable.
For a range of integers up to the length of the given list,
make all possible combinations and chain them together as one object.
From https://docs.python.org/3/library/itertools.html#itertools-recipes
""" |
"list(powerset([1,2,3])) --> [(), (1,), (2,), (3,), (1,2), (1,3), (2,3), (1,2,3)]"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) |
<SYSTEM_TASK:>
Approximate greedy algorithm for set-covering. Can be used on large
<END_TASK>
<USER_TASK:>
Description:
def greedy_set_cover(universe, subsets, costs):
"""Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
""" |
elements = set(e for s in subsets.keys() for e in subsets[s])
# elements don't cover universe -> invalid input for set cover
if elements != universe:
return None
# track elements of universe covered
covered = set()
cover_sets = []
while covered != universe:
min_cost_elem_ratio = float("inf")
min_set = None
# find set with minimum cost:elements_added ratio
for s, elements in subsets.items():
new_elements = len(elements - covered)
# set may have same elements as already covered -> new_elements = 0
# check to avoid division by 0 error
if new_elements != 0:
cost_elem_ratio = costs[s] / new_elements
if cost_elem_ratio < min_cost_elem_ratio:
min_cost_elem_ratio = cost_elem_ratio
min_set = s
cover_sets.append(min_set)
# union
covered |= subsets[min_set]
return cover_sets |
<SYSTEM_TASK:>
Re balance tree. After inserting or deleting a node,
<END_TASK>
<USER_TASK:>
Description:
def re_balance(self):
"""
Re balance tree. After inserting or deleting a node,
""" |
self.update_heights(recursive=False)
self.update_balances(False)
while self.balance < -1 or self.balance > 1:
if self.balance > 1:
if self.node.left.balance < 0:
self.node.left.rotate_left()
self.update_heights()
self.update_balances()
self.rotate_right()
self.update_heights()
self.update_balances()
if self.balance < -1:
if self.node.right.balance > 0:
self.node.right.rotate_right()
self.update_heights()
self.update_balances()
self.rotate_left()
self.update_heights()
self.update_balances() |
<SYSTEM_TASK:>
This is a brute force method where we keep a dict the size of the list
<END_TASK>
<USER_TASK:>
Description:
def kth_to_last_dict(head, k):
"""
This is a brute force method where we keep a dict the size of the list
Then we check it for the value we need. If the key is not in the dict,
our and statement will short circuit and return False
""" |
if not (head and k > -1):
return False
d = dict()
count = 0
while head:
d[count] = head
head = head.next
count += 1
return len(d)-k in d and d[len(d)-k] |
<SYSTEM_TASK:>
This is an optimal method using iteration.
<END_TASK>
<USER_TASK:>
Description:
def kth_to_last(head, k):
"""
This is an optimal method using iteration.
We move p1 k steps ahead into the list.
Then we move p1 and p2 together until p1 hits the end.
""" |
if not (head or k > -1):
return False
p1 = head
p2 = head
for i in range(1, k+1):
if p1 is None:
# Went too far, k is not valid
raise IndexError
p1 = p1.next
while p1:
p1 = p1.next
p2 = p2.next
return p2 |
<SYSTEM_TASK:>
A slightly more Pythonic approach with a recursive generator
<END_TASK>
<USER_TASK:>
Description:
def palindromic_substrings_iter(s):
"""
A slightly more Pythonic approach with a recursive generator
""" |
if not s:
yield []
return
for i in range(len(s), 0, -1):
sub = s[:i]
if sub == sub[::-1]:
for rest in palindromic_substrings_iter(s[i:]):
yield [sub] + rest |
<SYSTEM_TASK:>
Return list of all primes less than n,
<END_TASK>
<USER_TASK:>
Description:
def get_primes(n):
"""Return list of all primes less than n,
Using sieve of Eratosthenes.
""" |
if n <= 0:
raise ValueError("'n' must be a positive integer.")
# If x is even, exclude x from list (-1):
sieve_size = (n // 2 - 1) if n % 2 == 0 else (n // 2)
sieve = [True for _ in range(sieve_size)] # Sieve
primes = [] # List of Primes
if n >= 2:
primes.append(2) # 2 is prime by default
for i in range(sieve_size):
if sieve[i]:
value_at_i = i*2 + 3
primes.append(value_at_i)
for j in range(i, sieve_size, value_at_i):
sieve[j] = False
return primes |
<SYSTEM_TASK:>
returns a list with the permuations.
<END_TASK>
<USER_TASK:>
Description:
def permute(elements):
"""
returns a list with the permuations.
""" |
if len(elements) <= 1:
return [elements]
else:
tmp = []
for perm in permute(elements[1:]):
for i in range(len(elements)):
tmp.append(perm[:i] + elements[0:1] + perm[i:])
return tmp |
<SYSTEM_TASK:>
Initialize the rabit library with arguments
<END_TASK>
<USER_TASK:>
Description:
def init(args=None):
"""Initialize the rabit library with arguments""" |
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr) |
<SYSTEM_TASK:>
Get the processor name.
<END_TASK>
<USER_TASK:>
Description:
def get_processor_name():
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
""" |
mxlen = 256
length = ctypes.c_ulong()
buf = ctypes.create_string_buffer(mxlen)
_LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen)
return buf.value |
<SYSTEM_TASK:>
Broadcast object from one node to all other nodes.
<END_TASK>
<USER_TASK:>
Description:
def broadcast(data, root):
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
""" |
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, 'need to pass in data when broadcasting'
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_LIB.RabitBroadcast(ctypes.byref(length),
ctypes.sizeof(ctypes.c_ulong), root)
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p),
length.value, root)
data = pickle.loads(dptr.raw)
del dptr
else:
_LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p),
length.value, root)
del s
return data |
<SYSTEM_TASK:>
Update the boosters for one iteration
<END_TASK>
<USER_TASK:>
Description:
def update(self, iteration, fobj):
""""Update the boosters for one iteration""" |
self.bst.update(self.dtrain, iteration, fobj) |
<SYSTEM_TASK:>
return whether the current callback context is cv or train
<END_TASK>
<USER_TASK:>
Description:
def _get_callback_context(env):
"""return whether the current callback context is cv or train""" |
if env.model is not None and env.cvfolds is None:
context = 'train'
elif env.model is None and env.cvfolds is not None:
context = 'cv'
return context |
<SYSTEM_TASK:>
Create a callback that print evaluation result.
<END_TASK>
<USER_TASK:>
Description:
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
We print the evaluation results every **period** iterations
and on the first and the last iterations.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
""" |
def callback(env):
"""internal function"""
if env.rank != 0 or (not env.evaluation_result_list) or period is False or period == 0:
return
i = env.iteration
if i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration:
msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list])
rabit.tracker_print('[%d]\t%s\n' % (i, msg))
return callback |
<SYSTEM_TASK:>
Reset learning rate after iteration 1
<END_TASK>
<USER_TASK:>
Description:
def reset_learning_rate(learning_rates):
"""Reset learning rate after iteration 1
NOTE: the initial learning rate will still take in-effect on first iteration.
Parameters
----------
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g.
yields learning rate decay)
* list ``l``: ``eta = l[boosting_round]``
* function ``f``: ``eta = f(boosting_round, num_boost_round)``
Returns
-------
callback : function
The requested callback function.
""" |
def get_learning_rate(i, n, learning_rates):
"""helper providing the learning rate"""
if isinstance(learning_rates, list):
if len(learning_rates) != n:
raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.")
new_learning_rate = learning_rates[i]
else:
new_learning_rate = learning_rates(i, n)
return new_learning_rate
def callback(env):
"""internal function"""
context = _get_callback_context(env)
if context == 'train':
bst, i, n = env.model, env.iteration, env.end_iteration
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
elif context == 'cv':
i, n = env.iteration, env.end_iteration
for cvpack in env.cvfolds:
bst = cvpack.bst
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
callback.before_iteration = True
return callback |
<SYSTEM_TASK:>
Decorate an objective function
<END_TASK>
<USER_TASK:>
Description:
def _objective_decorator(func):
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func: callable
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
""" |
def inner(preds, dmatrix):
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner |
<SYSTEM_TASK:>
Return the predicted leaf every tree for each sample.
<END_TASK>
<USER_TASK:>
Description:
def apply(self, X, ntree_limit=0):
"""Return the predicted leaf every tree for each sample.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
""" |
test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)
return self.get_booster().predict(test_dmatrix,
pred_leaf=True,
ntree_limit=ntree_limit) |
<SYSTEM_TASK:>
Feature importances property
<END_TASK>
<USER_TASK:>
Description:
def feature_importances_(self):
"""
Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]``
""" |
if getattr(self, 'booster', None) is not None and self.booster != 'gbtree':
raise AttributeError('Feature importance is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
score = b.get_score(importance_type=self.importance_type)
all_features = [score.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum() |
<SYSTEM_TASK:>
Predict the probability of each `data` example being of a given class.
<END_TASK>
<USER_TASK:>
Description:
def predict_proba(self, data, ntree_limit=None, validate_features=True):
"""
Predict the probability of each `data` example being of a given class.
.. note:: This function is not thread safe
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``xgb.copy()`` to make copies
of model object and then call predict
Parameters
----------
data : DMatrix
The dmatrix storing the input.
ntree_limit : int
Limit number of trees in the prediction; defaults to best_ntree_limit if defined
(i.e. it has been trained with early stopping), otherwise 0 (use all trees).
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
a numpy array with the probability of each data example being of a given class.
""" |
test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)
if ntree_limit is None:
ntree_limit = getattr(self, "best_ntree_limit", 0)
class_probs = self.get_booster().predict(test_dmatrix,
ntree_limit=ntree_limit,
validate_features=validate_features)
if self.objective == "multi:softprob":
return class_probs
classone_probs = class_probs
classzero_probs = 1.0 - classone_probs
return np.vstack((classzero_probs, classone_probs)).transpose() |
<SYSTEM_TASK:>
Convert a list of Python str to C pointer
<END_TASK>
<USER_TASK:>
Description:
def from_pystr_to_cstr(data):
"""Convert a list of Python str to C pointer
Parameters
----------
data : list
list of str
""" |
if not isinstance(data, list):
raise NotImplementedError
pointers = (ctypes.c_char_p * len(data))()
if PY3:
data = [bytes(d, 'utf-8') for d in data]
else:
data = [d.encode('utf-8') if isinstance(d, unicode) else d # pylint: disable=undefined-variable
for d in data]
pointers[:] = data
return pointers |
<SYSTEM_TASK:>
Revert C pointer to Python str
<END_TASK>
<USER_TASK:>
Description:
def from_cstr_to_pystr(data, length):
"""Revert C pointer to Python str
Parameters
----------
data : ctypes pointer
pointer to data
length : ctypes pointer
pointer to length of data
""" |
if PY3:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
res.append(str(data[i].decode('utf-8')))
else:
res = []
for i in range(length.value):
try:
res.append(str(data[i].decode('ascii')))
except UnicodeDecodeError:
# pylint: disable=undefined-variable
res.append(unicode(data[i].decode('utf-8')))
return res |
<SYSTEM_TASK:>
Convert ctypes pointer to buffer type.
<END_TASK>
<USER_TASK:>
Description:
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type.""" |
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res |
<SYSTEM_TASK:>
Convert a python string to c array.
<END_TASK>
<USER_TASK:>
Description:
def c_array(ctype, values):
"""Convert a python string to c array.""" |
if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):
return (ctype * len(values)).from_buffer_copy(values)
return (ctype * len(values))(*values) |
<SYSTEM_TASK:>
Extract numpy array from single column data table
<END_TASK>
<USER_TASK:>
Description:
def _maybe_dt_array(array):
""" Extract numpy array from single column data table """ |
if not isinstance(array, DataTable) or array is None:
return array
if array.shape[1] > 1:
raise ValueError('DataTable for label or weight cannot have multiple columns')
# below requires new dt version
# extract first column
array = array.to_numpy()[:, 0].astype('float')
return array |
<SYSTEM_TASK:>
Initialize data from a datatable Frame.
<END_TASK>
<USER_TASK:>
Description:
def _init_from_dt(self, data, nthread):
"""
Initialize data from a datatable Frame.
""" |
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import frame_column_data_r # pylint: disable=no-name-in-module,import-error
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(data.stypes[icol].name.encode('utf-8'))
handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromDT(
ptrs, feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
nthread))
self.handle = handle |
<SYSTEM_TASK:>
Set float type property into the DMatrix
<END_TASK>
<USER_TASK:>
Description:
def set_float_info_npy2d(self, field, data):
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
""" |
if getattr(data, 'base', None) is not None and \
data.base is not None and isinstance(data, np.ndarray) \
and isinstance(data.base, np.ndarray) and (not data.flags.c_contiguous):
warnings.warn("Use subset (sliced data) of np.ndarray is not recommended " +
"because it will generate extra copies and increase memory consumption")
data = np.array(data, copy=True, dtype=np.float32)
else:
data = np.array(data, copy=False, dtype=np.float32)
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
_check_call(_LIB.XGDMatrixSetFloatInfo(self.handle,
c_str(field),
c_data,
c_bst_ulong(len(data)))) |
<SYSTEM_TASK:>
Initialize the model by load from rabit checkpoint.
<END_TASK>
<USER_TASK:>
Description:
def load_rabit_checkpoint(self):
"""Initialize the model by load from rabit checkpoint.
Returns
-------
version: integer
The version number of the model.
""" |
version = ctypes.c_int()
_check_call(_LIB.XGBoosterLoadRabitCheckpoint(
self.handle, ctypes.byref(version)))
return version.value |
<SYSTEM_TASK:>
Get attribute string from the Booster.
<END_TASK>
<USER_TASK:>
Description:
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
""" |
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)))
if success.value != 0:
return py_str(ret.value)
return None |
<SYSTEM_TASK:>
Get attributes stored in the Booster as a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def attributes(self):
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
""" |
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(_LIB.XGBoosterGetAttrNames(self.handle,
ctypes.byref(length),
ctypes.byref(sarr)))
attr_names = from_cstr_to_pystr(sarr, length)
return {n: self.attr(n) for n in attr_names} |
<SYSTEM_TASK:>
Set the attribute of the Booster.
<END_TASK>
<USER_TASK:>
Description:
def set_attr(self, **kwargs):
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
""" |
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value)) |
<SYSTEM_TASK:>
Set parameters into the Booster.
<END_TASK>
<USER_TASK:>
Description:
def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
""" |
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val)))) |
<SYSTEM_TASK:>
Evaluate the model on mat.
<END_TASK>
<USER_TASK:>
Description:
def eval(self, data, name='eval', iteration=0):
"""Evaluate the model on mat.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
name : str, optional
The name of the dataset.
iteration : int, optional
The current iteration number.
Returns
-------
result: str
Evaluation result string.
""" |
self._validate_features(data)
return self.eval_set([(data, name)], iteration) |
<SYSTEM_TASK:>
Save the model to a file.
<END_TASK>
<USER_TASK:>
Description:
def save_model(self, fname):
"""
Save the model to a file.
The model is saved in an XGBoost internal binary format which is
universal among the various XGBoost interfaces. Auxiliary attributes of
the Python Booster object (such as feature_names) will not be saved.
To preserve all attributes, pickle the Booster object.
Parameters
----------
fname : string
Output file name
""" |
if isinstance(fname, STRING_TYPES): # assume file name
_check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))
else:
raise TypeError("fname must be a string") |
<SYSTEM_TASK:>
Dump model into a text or JSON file.
<END_TASK>
<USER_TASK:>
Description:
def dump_model(self, fout, fmap='', with_stats=False, dump_format="text"):
"""
Dump model into a text or JSON file.
Parameters
----------
fout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump file. Can be 'text' or 'json'.
""" |
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == 'json':
fout.write('[\n')
for i, _ in enumerate(ret):
fout.write(ret[i])
if i < len(ret) - 1:
fout.write(",\n")
fout.write('\n]')
else:
for i, _ in enumerate(ret):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close() |
<SYSTEM_TASK:>
Returns the model dump as a list of strings.
<END_TASK>
<USER_TASK:>
Description:
def get_dump(self, fmap='', with_stats=False, dump_format="text"):
"""
Returns the model dump as a list of strings.
Parameters
----------
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool, optional
Controls whether the split statistics are output.
dump_format : string, optional
Format of model dump. Can be 'text' or 'json'.
""" |
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = len(self.feature_names)
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelExWithFeatures(
self.handle,
ctypes.c_int(flen),
fname,
ftype,
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModelEx(self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res |
<SYSTEM_TASK:>
Get split value histogram of a feature
<END_TASK>
<USER_TASK:>
Description:
def get_split_value_histogram(self, feature, fmap='', bins=None, as_pandas=True):
"""Get split value histogram of a feature
Parameters
----------
feature: str
The name of the feature.
fmap: str (optional)
The name of feature map file.
bin: int, default None
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas: bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
""" |
xgdump = self.get_dump(fmap=fmap)
values = []
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for i, _ in enumerate(xgdump):
m = re.findall(regexp, xgdump[i])
values.extend([float(x) for x in m])
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph = np.column_stack((nph[1][1:], nph[0]))
nph = nph[nph[:, 1] > 0]
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph, columns=['SplitValue', 'Count'])
if as_pandas and not PANDAS_INSTALLED:
sys.stderr.write(
"Returning histogram as ndarray (as_pandas == True, but pandas is not installed).")
return nph |
<SYSTEM_TASK:>
Plot importance based on fitted trees.
<END_TASK>
<USER_TASK:>
Description:
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features',
importance_type='weight', max_num_features=None,
grid=True, show_values=True, **kwargs):
"""Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
""" |
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install matplotlib to plot importance')
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(importance_type=importance_type)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError('tree must be Booster, XGBModel or dict instance')
if not importance:
raise ValueError('Booster.get_score() results in empty')
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
# pylint: disable=invalid-unary-operand-type
tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax |
<SYSTEM_TASK:>
Create a new action and assign callbacks, shortcuts, etc.
<END_TASK>
<USER_TASK:>
Description:
def newAction(parent, text, slot=None, shortcut=None, icon=None,
tip=None, checkable=False, enabled=True):
"""Create a new action and assign callbacks, shortcuts, etc.""" |
a = QAction(text, parent)
if icon is not None:
a.setIcon(newIcon(icon))
if shortcut is not None:
if isinstance(shortcut, (list, tuple)):
a.setShortcuts(shortcut)
else:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setCheckable(True)
a.setEnabled(enabled)
return a |
<SYSTEM_TASK:>
Sort the list into natural alphanumeric order.
<END_TASK>
<USER_TASK:>
Description:
def natural_sort(list, key=lambda s:s):
"""
Sort the list into natural alphanumeric order.
""" |
def get_alphanum_key_func(key):
convert = lambda text: int(text) if text.isdigit() else text
return lambda s: [convert(c) for c in re.split('([0-9]+)', key(s))]
sort_key = get_alphanum_key_func(key)
list.sort(key=sort_key) |
<SYSTEM_TASK:>
Select the first shape created which contains this point.
<END_TASK>
<USER_TASK:>
Description:
def selectShapePoint(self, point):
"""Select the first shape created which contains this point.""" |
self.deSelectShape()
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.selectShape(shape)
return
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.selectShape(shape)
self.calculateOffsets(shape, point)
return |
<SYSTEM_TASK:>
In the middle of drawing, toggling between modes should be disabled.
<END_TASK>
<USER_TASK:>
Description:
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled.""" |
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True) |
<SYSTEM_TASK:>
Function to handle difficult examples
<END_TASK>
<USER_TASK:>
Description:
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """ |
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass |
<SYSTEM_TASK:>
Pop-up and give focus to the label editor.
<END_TASK>
<USER_TASK:>
Description:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
""" |
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines() |
<SYSTEM_TASK:>
Figure out the size of the pixmap in order to fit the main widget.
<END_TASK>
<USER_TASK:>
Description:
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget.""" |
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2 |
<SYSTEM_TASK:>
A better wrapper over request for deferred signing
<END_TASK>
<USER_TASK:>
Description:
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing""" |
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body']) |
<SYSTEM_TASK:>
Exchange.request is the entry point for all generated methods
<END_TASK>
<USER_TASK:>
Description:
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods""" |
return self.fetch2(path, api, method, params, headers, body) |
<SYSTEM_TASK:>
A helper method for matching error strings exactly vs broadly
<END_TASK>
<USER_TASK:>
Description:
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly""" |
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None |
<SYSTEM_TASK:>
Deprecated, use decimal_to_precision instead
<END_TASK>
<USER_TASK:>
Description:
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead""" |
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision)) |
<SYSTEM_TASK:>
Checks an address is not the same character repeated or an empty sequence
<END_TASK>
<USER_TASK:>
Description:
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence""" |
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address |
<SYSTEM_TASK:>
Reads a .wav file.
<END_TASK>
<USER_TASK:>
Description:
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
""" |
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000)
frames = wf.getnframes()
pcm_data = wf.readframes(frames)
duration = frames / sample_rate
return pcm_data, sample_rate, duration |
<SYSTEM_TASK:>
Writes a .wav file.
<END_TASK>
<USER_TASK:>
Description:
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
""" |
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio) |
<SYSTEM_TASK:>
Generates audio frames from PCM audio data.
<END_TASK>
<USER_TASK:>
Description:
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
""" |
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n |
<SYSTEM_TASK:>
Generate a function to download a file based on given parameters
<END_TASK>
<USER_TASK:>
Description:
def _parallel_downloader(voxforge_url, archive_dir, total, counter):
"""Generate a function to download a file based on given parameters
This works by currying the above given arguments into a closure
in the form of the following function.
:param voxforge_url: the base voxforge URL
:param archive_dir: the location to store the downloaded file
:param total: the total number of files to download
:param counter: an atomic counter to keep track of # of downloaded files
:return: a function that actually downloads a file given these params
""" |
def download(d):
"""Binds voxforge_url, archive_dir, total, and counter into this scope
Downloads the given file
:param d: a tuple consisting of (index, file) where index is the index
of the file to download and file is the name of the file to download
"""
(i, file) = d
download_url = voxforge_url + '/' + file
c = counter.increment()
print('Downloading file {} ({}/{})...'.format(i+1, c, total))
maybe_download(filename_of(download_url), archive_dir, download_url)
return download |
<SYSTEM_TASK:>
Generate a function to extract a tar file based on given parameters
<END_TASK>
<USER_TASK:>
Description:
def _parallel_extracter(data_dir, number_of_test, number_of_dev, total, counter):
"""Generate a function to extract a tar file based on given parameters
This works by currying the above given arguments into a closure
in the form of the following function.
:param data_dir: the target directory to extract into
:param number_of_test: the number of files to keep as the test set
:param number_of_dev: the number of files to keep as the dev set
:param total: the total number of files to extract
:param counter: an atomic counter to keep track of # of extracted files
:return: a function that actually extracts a tar file given these params
""" |
def extract(d):
"""Binds data_dir, number_of_test, number_of_dev, total, and counter into this scope
Extracts the given file
:param d: a tuple consisting of (index, file) where index is the index
of the file to extract and file is the name of the file to extract
"""
(i, archive) = d
if i < number_of_test:
dataset_dir = path.join(data_dir, "test")
elif i<number_of_test+number_of_dev:
dataset_dir = path.join(data_dir, "dev")
else:
dataset_dir = path.join(data_dir, "train")
if not gfile.Exists(path.join(dataset_dir, '.'.join(filename_of(archive).split(".")[:-1]))):
c = counter.increment()
print('Extracting file {} ({}/{})...'.format(i+1, c, total))
tar = tarfile.open(archive)
tar.extractall(dataset_dir)
tar.close()
return extract |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def text_to_char_array(original, alphabet):
r"""
Given a Python string ``original``, remove unsupported characters, map characters
to integers and return a numpy array representing the processed string.
""" |
return np.asarray([alphabet.label_from_string(c) for c in original]) |
<SYSTEM_TASK:>
Wrapper for the CTC Beam Search Decoder.
<END_TASK>
<USER_TASK:>
Description:
def ctc_beam_search_decoder(probs_seq,
alphabet,
beam_size,
cutoff_prob=1.0,
cutoff_top_n=40,
scorer=None):
"""Wrapper for the CTC Beam Search Decoder.
:param probs_seq: 2-D list of probability distributions over each time
step, with each element being a list of normalized
probabilities over alphabet and blank.
:type probs_seq: 2-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param cutoff_prob: Cutoff probability in pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
""" |
beam_results = swigwrapper.ctc_beam_search_decoder(
probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n,
scorer)
beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results]
return beam_results |
<SYSTEM_TASK:>
Wrapper for the batched CTC beam search decoder.
<END_TASK>
<USER_TASK:>
Description:
def ctc_beam_search_decoder_batch(probs_seq,
seq_lengths,
alphabet,
beam_size,
num_processes,
cutoff_prob=1.0,
cutoff_top_n=40,
scorer=None):
"""Wrapper for the batched CTC beam search decoder.
:param probs_seq: 3-D list with each element as an instance of 2-D list
of probabilities used by ctc_beam_search_decoder().
:type probs_seq: 3-D list
:param alphabet: alphabet list.
:alphabet: Alphabet
:param beam_size: Width for beam search.
:type beam_size: int
:param num_processes: Number of parallel processes.
:type num_processes: int
:param cutoff_prob: Cutoff probability in alphabet pruning,
default 1.0, no pruning.
:type cutoff_prob: float
:param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n
characters with highest probs in alphabet will be
used in beam search, default 40.
:type cutoff_top_n: int
:param num_processes: Number of parallel processes.
:type num_processes: int
:param scorer: External scorer for partially decoded sentence, e.g. word
count or language model.
:type scorer: Scorer
:return: List of tuples of log probability and sentence as decoding
results, in descending order of the probability.
:rtype: list
""" |
batch_beam_results = swigwrapper.ctc_beam_search_decoder_batch(
probs_seq, seq_lengths, alphabet.config_file(), beam_size, num_processes,
cutoff_prob, cutoff_top_n, scorer)
batch_beam_results = [
[(res.probability, alphabet.decode(res.tokens)) for res in beam_results]
for beam_results in batch_beam_results
]
return batch_beam_results |
<SYSTEM_TASK:>
Microphone may not support our native processing sampling rate, so
<END_TASK>
<USER_TASK:>
Description:
def resample(self, data, input_rate):
"""
Microphone may not support our native processing sampling rate, so
resample from input_rate to RATE_PROCESS here for webrtcvad and
deepspeech
Args:
data (binary): Input audio stream
input_rate (int): Input audio rate to resample from
""" |
data16 = np.fromstring(string=data, dtype=np.int16)
resample_size = int(len(data16) / self.input_rate * self.RATE_PROCESS)
resample = signal.resample(data16, resample_size)
resample16 = np.array(resample, dtype=np.int16)
return resample16.tostring() |
<SYSTEM_TASK:>
Return a block of audio data resampled to 16000hz, blocking if necessary.
<END_TASK>
<USER_TASK:>
Description:
def read_resampled(self):
"""Return a block of audio data resampled to 16000hz, blocking if necessary.""" |
return self.resample(data=self.buffer_queue.get(),
input_rate=self.input_rate) |
<SYSTEM_TASK:>
Generator that yields all audio frames from microphone.
<END_TASK>
<USER_TASK:>
Description:
def frame_generator(self):
"""Generator that yields all audio frames from microphone.""" |
if self.input_rate == self.RATE_PROCESS:
while True:
yield self.read()
else:
while True:
yield self.read_resampled() |
<SYSTEM_TASK:>
Global `cut` function that supports parallel processing.
<END_TASK>
<USER_TASK:>
Description:
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
""" |
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w |
<SYSTEM_TASK:>
Change the module's `cut` and `cut_for_search` functions to the
<END_TASK>
<USER_TASK:>
Description:
def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
""" |
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.