_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q277500
|
validate
|
test
|
def validate(metric_class):
"""
Does basic Metric option validation.
"""
if not hasattr(metric_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for metric %s." % metric_class.__name__)
if not hasattr(metric_class, 'widget'):
raise ImproperlyConfigured("No 'widget' attribute found for metric %s." % metric_class.__name__)
|
python
|
{
"resource": ""
}
|
q277501
|
get_statistic_by_name
|
test
|
def get_statistic_by_name(stat_name):
"""
Fetches a statistics based on the given class name. Does a look-up
in the gadgets' registered statistics to find the specified one.
"""
if stat_name == 'ALL':
return get_statistic_models()
for stat in get_statistic_models():
if stat.__name__ == stat_name:
return stat
raise Exception, _("%(stat)s cannot be found.") % {'stat': stat_name}
|
python
|
{
"resource": ""
}
|
q277502
|
calculate_statistics
|
test
|
def calculate_statistics(stat, frequencies):
"""
Calculates all of the metrics associated with the registered gadgets.
"""
stats = ensure_list(stat)
frequencies = ensure_list(frequencies)
for stat in stats:
for f in frequencies:
print "Calculating %s (%s)..." % (stat.__name__, settings.STATISTIC_FREQUENCY_DICT[f])
stat.calculate(f)
|
python
|
{
"resource": ""
}
|
q277503
|
autodiscover
|
test
|
def autodiscover():
"""
Auto-discover INSTALLED_APPS gadgets.py modules and fail silently when
not present. This forces an import on them to register any gadgets they
may want.
"""
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's gadgets module.
try:
import_module('%s.gadgets' % app)
except:
# Decide whether to bubble up this error. If the app just
# doesn't have a gadgets module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'gadgets'):
raise
|
python
|
{
"resource": ""
}
|
q277504
|
csv_dump
|
test
|
def csv_dump(request, uid):
"""
Returns a CSV dump of all of the specified metric's counts
and cumulative counts.
"""
metric = Metric.objects.get(uid=uid)
frequency = request.GET.get('frequency', settings.STATISTIC_FREQUENCY_DAILY)
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s%s.csv' % (uid, datetime.datetime.now().strftime("%Y%m%d-%H%M"))
writer = csv.writer(response)
writer.writerow([_('Date/time'), _('Count'), _('Cumulative count')])
for stat in metric.statistics.filter(frequency=frequency).order_by('date_time'):
writer.writerow([stat.date_time.strftime(settings.CSV_DATETIME_FORMAT), stat.count, stat.cumulative_count])
return response
|
python
|
{
"resource": ""
}
|
q277505
|
Command.handle
|
test
|
def handle(self, *args, **kwargs):
"""
Command handler for the "metrics" command.
"""
frequency = kwargs['frequency']
frequencies = settings.STATISTIC_FREQUENCY_ALL if frequency == 'a' else (frequency.split(',') if ',' in frequency else [frequency])
if kwargs['list']:
maintenance.list_statistics()
# if we're supposed to calculate the latest statistics
elif kwargs['calculate']:
maintenance.calculate_statistics(maintenance.get_statistic_by_name(kwargs['calculate']), frequencies)
# pure reset of statistic(s)
elif kwargs['reset']:
maintenance.reset_statistics(maintenance.get_statistic_by_name(kwargs['reset']), frequencies, kwargs['reset_cumulative'])
# recalculation of statistic(s)
elif kwargs['recalculate']:
maintenance.reset_statistics(maintenance.get_statistic_by_name(kwargs['recalculate']), frequencies, kwargs['reset_cumulative'], True)
|
python
|
{
"resource": ""
}
|
q277506
|
get_GET_array
|
test
|
def get_GET_array(request, var_name, fail_silently=True):
"""
Returns the GET array's contents for the specified variable.
"""
vals = request.GET.getlist(var_name)
if not vals:
if fail_silently:
return []
else:
raise Exception, _("No array called '%(varname)s' in GET variables") % {'varname': var_name}
return vals
|
python
|
{
"resource": ""
}
|
q277507
|
get_GET_bool
|
test
|
def get_GET_bool(request, var_name, default=True):
"""
Tries to extract a boolean variable from the specified request.
"""
val = request.GET.get(var_name, default)
if isinstance(val, str) or isinstance(val, unicode):
val = True if val[0] == 't' else False
return val
|
python
|
{
"resource": ""
}
|
q277508
|
get_next_colour
|
test
|
def get_next_colour():
"""
Gets the next colour in the Geckoboard colour list.
"""
colour = settings.GECKOBOARD_COLOURS[get_next_colour.cur_colour]
get_next_colour.cur_colour += 1
if get_next_colour.cur_colour >= len(settings.GECKOBOARD_COLOURS):
get_next_colour.cur_colour = 0
return colour
|
python
|
{
"resource": ""
}
|
q277509
|
get_gecko_params
|
test
|
def get_gecko_params(request, uid=None, days_back=0, cumulative=True,
frequency=settings.STATISTIC_FREQUENCY_DAILY, min_val=0, max_val=100,
chart_type='standard', percentage='show', sort=False):
"""
Returns the default GET parameters for a particular Geckoboard
view request.
"""
return {
'days_back' : int(request.GET.get('daysback', days_back)),
'uid' : request.GET.get('uid', uid),
'uids' : get_GET_array(request, 'uids[]'),
'cumulative' : get_GET_bool(request, 'cumulative', cumulative),
'frequency' : request.GET.get('frequency', frequency),
'min' : request.GET.get('min', min_val),
'max' : request.GET.get('max', max_val),
'type' : request.GET.get('type', chart_type),
'percentage' : request.GET.get('percentage', percentage),
'sort' : get_GET_bool(request, 'sort', sort),
}
|
python
|
{
"resource": ""
}
|
q277510
|
geckoboard_number_widget
|
test
|
def geckoboard_number_widget(request):
"""
Returns a number widget for the specified metric's cumulative total.
"""
params = get_gecko_params(request, days_back=7)
metric = Metric.objects.get(uid=params['uid'])
try:
latest_stat = metric.statistics.filter(frequency=params['frequency']).order_by('-date_time')[0]
except IndexError:
return (0, 0)
try:
prev_stat = metric.statistics.filter(frequency=params['frequency'],
date_time__lte=latest_stat.date_time-timedelta(days=params['days_back'])).order_by('-date_time')[0]
except IndexError:
# if there is no previous stat
return (latest_stat.cumulative_count, 0) if params['cumulative'] else (latest_stat.count, 0)
return (latest_stat.cumulative_count, prev_stat.cumulative_count) if params['cumulative'] else (latest_stat.count, prev_stat.count)
|
python
|
{
"resource": ""
}
|
q277511
|
geckoboard_rag_widget
|
test
|
def geckoboard_rag_widget(request):
"""
Searches the GET variables for metric UIDs, and displays
them in a RAG widget.
"""
params = get_gecko_params(request)
print params['uids']
max_date = datetime.now()-timedelta(days=params['days_back'])
metrics = Metric.objects.filter(uid__in=params['uids'])
results = [(metric.latest_count(frequency=params['frequency'], count=not params['cumulative'],
cumulative=params['cumulative'], max_date=max_date), metric.title) for metric in metrics]
return tuple(results)
|
python
|
{
"resource": ""
}
|
q277512
|
geckoboard_line_chart
|
test
|
def geckoboard_line_chart(request):
"""
Returns the data for a line chart for the specified metric.
"""
params = get_gecko_params(request, cumulative=False, days_back=7)
metric = Metric.objects.get(uid=params['uid'])
start_date = datetime.now()-timedelta(days=params['days_back'])
stats = [s for s in metric.statistics.filter(frequency=params['frequency'],
date_time__gte=start_date).order_by('date_time')]
if len(stats) == 0:
raise Exception, _("No statistics for metric %(metric)s.") % {'metric': params['uid']}
dates = [stats[0].date_time]
# get up to 3 dates from the stats
if len(stats) >= 3:
mid = len(stats)/2
if not mid:
mid = 1
dates.extend([stats[mid].date_time, stats[-1].date_time])
elif len(stats) == 2:
dates.extend([stats[-1].date_time])
return (
[s.count for s in stats],
dates,
metric.title,
)
|
python
|
{
"resource": ""
}
|
q277513
|
geckoboard_geckometer
|
test
|
def geckoboard_geckometer(request):
"""
Returns a Geck-o-Meter control for the specified metric.
"""
params = get_gecko_params(request, cumulative=True)
metric = Metric.objects.get(uid=params['uid'])
return (metric.latest_count(frequency=params['frequency'], count=not params['cumulative'],
cumulative=params['cumulative']), params['min'], params['max'])
|
python
|
{
"resource": ""
}
|
q277514
|
geckoboard_funnel
|
test
|
def geckoboard_funnel(request, frequency=settings.STATISTIC_FREQUENCY_DAILY):
"""
Returns a funnel chart for the metrics specified in the GET variables.
"""
# get all the parameters for this function
params = get_gecko_params(request, cumulative=True)
metrics = Metric.objects.filter(uid__in=params['uids'])
items = [(metric.latest_count(frequency=params['frequency'], count=not params['cumulative'],
cumulative=params['cumulative']), metric.title) for metric in metrics]
return {
'items' : items,
'type' : params['type'],
'percentage': params['percentage'],
'sort' : params['sort'],
}
|
python
|
{
"resource": ""
}
|
q277515
|
AnalyticsView.get_active_stats
|
test
|
def get_active_stats(self):
"""
Returns all of the active statistics for the gadgets currently registered.
"""
stats = []
for gadget in self._registry.values():
for s in gadget.stats:
if s not in stats:
stats.append(s)
return stats
|
python
|
{
"resource": ""
}
|
q277516
|
AnalyticsView.register
|
test
|
def register(self, gadget):
"""
Registers a gadget object.
If a gadget is already registered, this will raise AlreadyRegistered.
"""
if gadget in self._registry:
raise AlreadyRegistered
else:
self._registry.append(gadget)
|
python
|
{
"resource": ""
}
|
q277517
|
AnalyticsView.get_context_data
|
test
|
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
#max_columns, max_rows = self.get_max_dimension()
context = {
'gadgets': self._registry,
'columns': self.columns,
'rows': self.rows,
'column_ratio': 100 - self.columns * 2,
'row_ratio': 100 - self.rows * 2,
}
context.update(kwargs)
return context
|
python
|
{
"resource": ""
}
|
q277518
|
Command.error
|
test
|
def error(self, message, code=1):
"""
Print error and stop command
"""
print >>sys.stderr, message
sys.exit(code)
|
python
|
{
"resource": ""
}
|
q277519
|
Service.valid
|
test
|
def valid(schema=None):
""" Validation data by specific validictory configuration """
def dec(fun):
@wraps(fun)
def d_func(self, ctx, data, *a, **kw):
try:
validate(data['params'], schema)
except ValidationError as err:
raise InvalidParams(err)
except SchemaError as err:
raise InternalError(err)
return fun(self, ctx, data['params'], *a, **kw)
return d_func
return dec
|
python
|
{
"resource": ""
}
|
q277520
|
long_input
|
test
|
def long_input(prompt='Multi-line input\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxlines = None, maxlength = None):
"""Get a multi-line string as input"""
lines = []
print(prompt)
lnum = 1
try:
while True:
if maxlines:
if lnum > maxlines:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
lnum += 1
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
except EOFError:
pass
finally:
return '\n'.join(lines)
|
python
|
{
"resource": ""
}
|
q277521
|
list_input
|
test
|
def list_input(prompt='List input - enter each item on a seperate line\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxitems=None, maxlength=None):
"""Get a list of strings as input"""
lines = []
print(prompt)
inum = 1
try:
while True:
if maxitems:
if inum > maxitems:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
inum += 1
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
except EOFError:
pass
finally:
return lines
|
python
|
{
"resource": ""
}
|
q277522
|
outfile_input
|
test
|
def outfile_input(extension=None):
"""Get an output file name as input"""
fileok = False
while not fileok:
filename = string_input('File name? ')
if extension:
if not filename.endswith(extension):
if extension.startswith('.'):
filename = filename + extension
else:
filename = filename + '.' + extension
if os.path.isfile(filename):
choice = choice_input(prompt=filename + \
' already exists. Overwrite?',
options=['y', 'n'])
if choice == 'y':
try:
nowtime = time.time()
with open(filename, 'a') as f:
os.utime(filename, (nowtime, nowtime))
fileok = True
except IOError:
print('Write permission denied on ' + filename + \
'. Try again.')
except PermissionError:
print('Write permission denied on ' + filename + \
'. Try again.')
except FileNotFoundError:
print(filename + ': directory not found. Try again.')
else:
choice = choice_input(
prompt=filename + ' does not exist. Create it?',
options=['y', 'n'])
if choice == 'y':
try:
nowtime = time.time()
with open(filename, 'w') as f:
os.utime(filename, (nowtime, nowtime))
fileok = True
except IOError:
print('Write permission denied on ' + filename + \
'. Try again.')
except PermissionError:
print('Write permission denied on ' + filename + \
'. Try again.')
except FileNotFoundError:
print(filename + ': directory not found. Try again.')
return filename
|
python
|
{
"resource": ""
}
|
q277523
|
Team.schedule
|
test
|
def schedule(self, year):
"""Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information.
"""
doc = self.get_year_doc('{}_games'.format(year))
table = doc('table#games')
df = sportsref.utils.parse_table(table)
return df
|
python
|
{
"resource": ""
}
|
q277524
|
BoxScore.winner
|
test
|
def winner(self):
"""Returns the team ID of the winning team. Returns NaN if a tie."""
hmScore = self.home_score()
awScore = self.away_score()
if hmScore > awScore:
return self.home()
elif hmScore < awScore:
return self.away()
else:
return None
|
python
|
{
"resource": ""
}
|
q277525
|
BoxScore.season
|
test
|
def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
"""
date = self.date()
return date.year - 1 if date.month <= 3 else date.year
|
python
|
{
"resource": ""
}
|
q277526
|
BoxScore.starters
|
test
|
def starters(self):
"""Returns a DataFrame where each row is an entry in the starters table
from PFR.
The columns are:
* player_id - the PFR player ID for the player (note that this column
is not necessarily all unique; that is, one player can be a starter in
multiple positions, in theory).
* playerName - the listed name of the player; this too is not
necessarily unique.
* position - the position at which the player started for their team.
* team - the team for which the player started.
* home - True if the player's team was at home, False if they were away
* offense - True if the player is starting on an offensive position,
False if defense.
:returns: A pandas DataFrame. See the description for details.
"""
doc = self.get_doc()
a = doc('table#vis_starters')
h = doc('table#home_starters')
data = []
for h, table in enumerate((a, h)):
team = self.home() if h else self.away()
for i, row in enumerate(table('tbody tr').items()):
datum = {}
datum['player_id'] = sportsref.utils.rel_url_to_id(
row('a')[0].attrib['href']
)
datum['playerName'] = row('th').text()
datum['position'] = row('td').text()
datum['team'] = team
datum['home'] = (h == 1)
datum['offense'] = (i <= 10)
data.append(datum)
return pd.DataFrame(data)
|
python
|
{
"resource": ""
}
|
q277527
|
BoxScore.surface
|
test
|
def surface(self):
"""The playing surface on which the game was played.
:returns: string representing the type of surface. Returns np.nan if
not avaiable.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
return giTable.get('surface', np.nan)
|
python
|
{
"resource": ""
}
|
q277528
|
BoxScore.coin_toss
|
test
|
def coin_toss(self):
"""Gets information relating to the opening coin toss.
Keys are:
* wonToss - contains the ID of the team that won the toss
* deferred - bool whether the team that won the toss deferred it
:returns: Dictionary of coin toss-related info.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
if 'Won Toss' in giTable:
# TODO: finish coinToss function
pass
else:
return None
|
python
|
{
"resource": ""
}
|
q277529
|
BoxScore.weather
|
test
|
def weather(self):
"""Returns a dictionary of weather-related info.
Keys of the returned dict:
* temp
* windChill
* relHumidity
* windMPH
:returns: Dict of weather data.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
if 'weather' in giTable:
regex = (
r'(?:(?P<temp>\-?\d+) degrees )?'
r'(?:relative humidity (?P<relHumidity>\d+)%, )?'
r'(?:wind (?P<windMPH>\d+) mph, )?'
r'(?:wind chill (?P<windChill>\-?\d+))?'
)
m = re.match(regex, giTable['weather'])
d = m.groupdict()
# cast values to int
for k in d:
try:
d[k] = int(d[k])
except TypeError:
pass
# one-off fixes
d['windChill'] = (d['windChill'] if pd.notnull(d['windChill'])
else d['temp'])
d['windMPH'] = d['windMPH'] if pd.notnull(d['windMPH']) else 0
return d
else:
# no weather found, because it's a dome
# TODO: what's relative humidity in a dome?
return {
'temp': 70, 'windChill': 70, 'relHumidity': None, 'windMPH': 0
}
|
python
|
{
"resource": ""
}
|
q277530
|
BoxScore.ref_info
|
test
|
def ref_info(self):
"""Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
:returns: A dictionary of ref positions and IDs.
"""
doc = self.get_doc()
table = doc('table#officials')
return sportsref.utils.parse_info_table(table)
|
python
|
{
"resource": ""
}
|
q277531
|
Season.schedule
|
test
|
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games]
|
python
|
{
"resource": ""
}
|
q277532
|
Season.standings
|
test
|
def standings(self):
"""Returns a DataFrame containing standings information."""
doc = self.get_sub_doc('standings')
east_table = doc('table#divs_standings_E')
east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))
east_df.sort_values('wins', ascending=False, inplace=True)
east_df['seed'] = range(1, len(east_df) + 1)
east_df['conference'] = 'E'
west_table = doc('table#divs_standings_W')
west_df = sportsref.utils.parse_table(west_table)
west_df.sort_values('wins', ascending=False, inplace=True)
west_df['seed'] = range(1, len(west_df) + 1)
west_df['conference'] = 'W'
full_df = pd.concat([east_df, west_df], axis=0).reset_index(drop=True)
full_df['team_id'] = full_df.team_id.str.extract(r'(\w+)\W*\(\d+\)', expand=False)
full_df['gb'] = [gb if isinstance(gb, int) or isinstance(gb, float) else 0
for gb in full_df['gb']]
full_df = full_df.drop('has_class_full_table', axis=1)
expanded_table = doc('table#expanded_standings')
expanded_df = sportsref.utils.parse_table(expanded_table)
full_df = pd.merge(full_df, expanded_df, on='team_id')
return full_df
|
python
|
{
"resource": ""
}
|
q277533
|
Season._get_team_stats_table
|
test
|
def _get_team_stats_table(self, selector):
"""Helper function for stats tables on season pages. Returns a
DataFrame."""
doc = self.get_main_doc()
table = doc(selector)
df = sportsref.utils.parse_table(table)
df.set_index('team_id', inplace=True)
return df
|
python
|
{
"resource": ""
}
|
q277534
|
Season.roy_voting
|
test
|
def roy_voting(self):
"""Returns a DataFrame containing information about ROY voting."""
url = '{}/awards/awards_{}.html'.format(sportsref.nba.BASE_URL, self.yr)
doc = pq(sportsref.utils.get_html(url))
table = doc('table#roy')
df = sportsref.utils.parse_table(table)
return df
|
python
|
{
"resource": ""
}
|
q277535
|
BoxScore.linescore
|
test
|
def linescore(self):
"""Returns the linescore for the game as a DataFrame."""
doc = self.get_main_doc()
table = doc('table#line_score')
columns = [th.text() for th in table('tr.thead').items('th')]
columns[0] = 'team_id'
data = [
[sportsref.utils.flatten_links(td) for td in tr('td').items()]
for tr in table('tr.thead').next_all('tr').items()
]
return pd.DataFrame(data, index=['away', 'home'],
columns=columns, dtype='float')
|
python
|
{
"resource": ""
}
|
q277536
|
BoxScore.season
|
test
|
def season(self):
"""
Returns the year ID of the season in which this game took place.
:returns: An int representing the year of the season.
"""
d = self.date()
if d.month >= 9:
return d.year + 1
else:
return d.year
|
python
|
{
"resource": ""
}
|
q277537
|
BoxScore._get_player_stats
|
test
|
def _get_player_stats(self, table_id_fmt):
"""Returns a DataFrame of player stats from the game (either basic or
advanced, depending on the argument.
:param table_id_fmt: Format string for str.format with a placeholder
for the team ID (e.g. 'box_{}_basic')
:returns: DataFrame of player stats
"""
# get data
doc = self.get_main_doc()
tms = self.away(), self.home()
tm_ids = [table_id_fmt.format(tm) for tm in tms]
tables = [doc('table#{}'.format(tm_id).lower()) for tm_id in tm_ids]
dfs = [sportsref.utils.parse_table(table) for table in tables]
# clean data and add features
for i, (tm, df) in enumerate(zip(tms, dfs)):
no_time = df['mp'] == 0
stat_cols = [col for col, dtype in df.dtypes.items()
if dtype != 'object']
df.loc[no_time, stat_cols] = 0
df['team_id'] = tm
df['is_home'] = i == 1
df['is_starter'] = [p < 5 for p in range(df.shape[0])]
df.drop_duplicates(subset='player_id', keep='first', inplace=True)
return pd.concat(dfs)
|
python
|
{
"resource": ""
}
|
q277538
|
switch_to_dir
|
test
|
def switch_to_dir(dirPath):
"""
Decorator that switches to given directory before executing function, and
then returning to orignal directory.
"""
def decorator(func):
@funcutils.wraps(func)
def wrapper(*args, **kwargs):
orig_cwd = os.getcwd()
os.chdir(dirPath)
ret = func(*args, **kwargs)
os.chdir(orig_cwd)
return ret
return wrapper
return decorator
|
python
|
{
"resource": ""
}
|
q277539
|
cache
|
test
|
def cache(func):
"""Caches the HTML returned by the specified function `func`. Caches it in
the user cache determined by the appdirs package.
"""
CACHE_DIR = appdirs.user_cache_dir('sportsref', getpass.getuser())
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
@funcutils.wraps(func)
def wrapper(url):
# hash based on the URL
file_hash = hashlib.md5()
encoded_url = url.encode(errors='replace')
file_hash.update(encoded_url)
file_hash = file_hash.hexdigest()
filename = '{}/{}'.format(CACHE_DIR, file_hash)
sport_id = None
for a_base_url, a_sport_id in sportsref.SITE_ABBREV.items():
if url.startswith(a_base_url):
sport_id = a_sport_id
break
else:
print('No sport ID found for {}, not able to check cache'.format(url))
# check whether cache is valid or stale
file_exists = os.path.isfile(filename)
if sport_id and file_exists:
cur_time = int(time.time())
mod_time = int(os.path.getmtime(filename))
days_since_mod = datetime.timedelta(seconds=(cur_time - mod_time)).days
days_cache_valid = globals()['_days_valid_{}'.format(sport_id)](url)
cache_is_valid = days_since_mod < days_cache_valid
else:
cache_is_valid = False
# if file found and cache is valid, read from file
allow_caching = sportsref.get_option('cache')
if file_exists and cache_is_valid and allow_caching:
with codecs.open(filename, 'r', encoding='utf-8', errors='replace') as f:
text = f.read()
# otherwise, execute function and cache results
else:
text = func(url)
with codecs.open(filename, 'w+', encoding='utf-8') as f:
f.write(text)
return text
return wrapper
|
python
|
{
"resource": ""
}
|
q277540
|
get_class_instance_key
|
test
|
def get_class_instance_key(cls, args, kwargs):
"""
Returns a unique identifier for a class instantiation.
"""
l = [id(cls)]
for arg in args:
l.append(id(arg))
l.extend((k, id(v)) for k, v in kwargs.items())
return tuple(sorted(l))
|
python
|
{
"resource": ""
}
|
q277541
|
memoize
|
test
|
def memoize(fun):
"""A decorator for memoizing functions.
Only works on functions that take simple arguments - arguments that take
list-like or dict-like arguments will not be memoized, and this function
will raise a TypeError.
"""
@funcutils.wraps(fun)
def wrapper(*args, **kwargs):
do_memoization = sportsref.get_option('memoize')
if not do_memoization:
return fun(*args, **kwargs)
hash_args = tuple(args)
hash_kwargs = frozenset(sorted(kwargs.items()))
key = (hash_args, hash_kwargs)
def _copy(v):
if isinstance(v, pq):
return v.clone()
else:
return copy.deepcopy(v)
try:
ret = _copy(cache[key])
return ret
except KeyError:
cache[key] = fun(*args, **kwargs)
ret = _copy(cache[key])
return ret
except TypeError:
print('memoization type error in function {} for arguments {}'
.format(fun.__name__, key))
raise
cache = {}
return wrapper
|
python
|
{
"resource": ""
}
|
q277542
|
Player.age
|
test
|
def age(self, year, month=2, day=1):
"""Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float.
"""
doc = self.get_main_doc()
date_string = doc('span[itemprop="birthDate"]').attr('data-birth')
regex = r'(\d{4})\-(\d{2})\-(\d{2})'
date_args = map(int, re.match(regex, date_string).groups())
birth_date = datetime.date(*date_args)
age_date = datetime.date(year=year, month=month, day=day)
delta = age_date - birth_date
age = delta.days / 365.
return age
|
python
|
{
"resource": ""
}
|
q277543
|
Player._get_stats_table
|
test
|
def _get_stats_table(self, table_id, kind='R', summary=False):
"""Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats.
"""
doc = self.get_main_doc()
table_id = 'table#{}{}'.format(
'playoffs_' if kind == 'P' else '', table_id)
table = doc(table_id)
df = sportsref.utils.parse_table(table, flatten=(not summary),
footer=summary)
return df
|
python
|
{
"resource": ""
}
|
q277544
|
Player.stats_per_game
|
test
|
def stats_per_game(self, kind='R', summary=False):
"""Returns a DataFrame of per-game box score stats."""
return self._get_stats_table('per_game', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277545
|
Player.stats_totals
|
test
|
def stats_totals(self, kind='R', summary=False):
"""Returns a DataFrame of total box score statistics by season."""
return self._get_stats_table('totals', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277546
|
Player.stats_per36
|
test
|
def stats_per36(self, kind='R', summary=False):
"""Returns a DataFrame of per-36-minutes stats."""
return self._get_stats_table('per_minute', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277547
|
Player.stats_per100
|
test
|
def stats_per100(self, kind='R', summary=False):
"""Returns a DataFrame of per-100-possession stats."""
return self._get_stats_table('per_poss', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277548
|
Player.stats_advanced
|
test
|
def stats_advanced(self, kind='R', summary=False):
"""Returns a DataFrame of advanced stats."""
return self._get_stats_table('advanced', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277549
|
Player.stats_shooting
|
test
|
def stats_shooting(self, kind='R', summary=False):
"""Returns a DataFrame of shooting stats."""
return self._get_stats_table('shooting', kind=kind, summary=summary)
|
python
|
{
"resource": ""
}
|
q277550
|
Player.stats_pbp
|
test
|
def stats_pbp(self, kind='R', summary=False):
"""Returns a DataFrame of play-by-play stats."""
return self._get_stats_table('advanced_pbp', kind=kind,
summary=summary)
|
python
|
{
"resource": ""
}
|
q277551
|
Player.gamelog_basic
|
test
|
def gamelog_basic(self, year, kind='R'):
"""Returns a table of a player's basic game-by-game stats for a season.
:param year: The year representing the desired season.
:param kind: specifies regular season, playoffs, or both. One of 'R',
'P', 'B'. Defaults to 'R'.
:returns: A DataFrame of the player's standard boxscore stats from each
game of the season.
:rtype: pd.DataFrame
"""
doc = self.get_sub_doc('gamelog/{}'.format(year))
table = (doc('table#pgl_basic_playoffs')
if kind == 'P' else doc('table#pgl_basic'))
df = sportsref.utils.parse_table(table)
return df
|
python
|
{
"resource": ""
}
|
q277552
|
DeleteHandler.get
|
test
|
def get(self):
'''
Please don't do this in production environments.
'''
self.write("Memory Session Object Demo:")
if "sv" in self.session:
current_value = self.session["sv"]
self.write("current sv value is %s, and system will delete this value.<br/>" % self.session["sv"])
self.session.delete("sv")
if "sv" not in self.session:
self.write("current sv value is empty")
else:
self.write("Session data not found")
|
python
|
{
"resource": ""
}
|
q277553
|
expand_details
|
test
|
def expand_details(df, detailCol='detail'):
"""Expands the details column of the given dataframe and returns the
resulting DataFrame.
:df: The input DataFrame.
:detailCol: The detail column name.
:returns: Returns DataFrame with new columns from pbp parsing.
"""
df = copy.deepcopy(df)
df['detail'] = df[detailCol]
dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values]
# clean up unmatched details
cols = {c for d in dicts if d for c in d.keys()}
blankEntry = {c: np.nan for c in cols}
newDicts = [d if d else blankEntry for d in dicts]
# get details DataFrame and merge it with original to create main DataFrame
details = pd.DataFrame(newDicts)
df = pd.merge(df, details, left_index=True, right_index=True)
# add isError column
errors = [i for i, d in enumerate(dicts) if d is None]
df['isError'] = False
df.loc[errors, 'isError'] = True
# fill in some NaN's necessary for _clean_features
df.loc[0, 'qtr_time_remain'] = '15:00'
df.qtr_time_remain.fillna(method='bfill', inplace=True)
df.qtr_time_remain.fillna(
pd.Series(np.where(df.quarter == 4, '0:00', '15:00')), inplace=True
)
# use _clean_features to clean up and add columns
new_df = df.apply(_clean_features, axis=1)
return new_df
|
python
|
{
"resource": ""
}
|
q277554
|
_add_team_columns
|
test
|
def _add_team_columns(features):
"""Function that adds 'team' and 'opp' columns to the features by iterating
through the rows in order. A precondition is that the features dicts are in
order in a continuous game sense and that all rows are from the same game.
:features: A DataFrame with each row representing each play (in order).
:returns: A similar DataFrame but with 'team' and 'opp' columns added.
"""
features = features.to_dict('records')
curTm = curOpp = None
playAfterKickoff = False
# fill in team and opp columns
for row in features:
# if it's a kickoff or the play after a kickoff,
# figure out who has possession manually
if row['isKickoff'] or playAfterKickoff:
curTm, curOpp = _team_and_opp(row)
else:
curTm, curOpp = _team_and_opp(row, curTm, curOpp)
row['team'], row['opp'] = curTm, curOpp
# set playAfterKickoff
playAfterKickoff = row['isKickoff']
features = pd.DataFrame(features)
features.team.fillna(method='bfill', inplace=True)
features.opp.fillna(method='bfill', inplace=True)
# ffill for last row
features.team.fillna(method='ffill', inplace=True)
features.opp.fillna(method='ffill', inplace=True)
return features
|
python
|
{
"resource": ""
}
|
q277555
|
_add_team_features
|
test
|
def _add_team_features(df):
"""Adds extra convenience features based on teams with and without
possession, with the precondition that the there are 'team' and 'opp'
specified in row.
:df: A DataFrame representing a game's play-by-play data after
_clean_features has been called and 'team' and 'opp' have been added by
_add_team_columns.
:returns: A dict with new features in addition to previous features.
"""
assert df.team.notnull().all()
homeOnOff = df['team'] == df['home']
# create column for distToGoal
df['distToGoal'] = np.where(df['team'] != df['fieldSide'],
df['ydLine'], 100 - df['ydLine'])
df['distToGoal'] = np.where(df['isXP'] | df['isTwoPoint'],
2, df['distToGoal'])
# create column for each team's WP
df['team_wp'] = np.where(homeOnOff, df['home_wp'], 100. - df['home_wp'])
df['opp_wp'] = 100. - df['team_wp']
# create columns for each team's WPA
df['team_wpa'] = np.where(homeOnOff, df['home_wpa'], -df['home_wpa'])
df['opp_wpa'] = -df['team_wpa']
# create column for offense and defense scores if not already there
assert df['boxscore_id'].nunique() == 1
bs_id = df['boxscore_id'].values[0]
bs = sportsref.nfl.boxscores.BoxScore(bs_id)
df['team_score'] = np.where(df['team'] == bs.home(),
df['pbp_score_hm'], df['pbp_score_aw'])
df['opp_score'] = np.where(df['team'] == bs.home(),
df['pbp_score_aw'], df['pbp_score_hm'])
return df
|
python
|
{
"resource": ""
}
|
q277556
|
initialWinProb
|
test
|
def initialWinProb(line):
"""Gets the initial win probability of a game given its Vegas line.
:line: The Vegas line from the home team's perspective (negative means
home team is favored).
:returns: A float in [0., 100.] that represents the win probability.
"""
line = float(line)
probWin = 1. - norm.cdf(0.5, -line, 13.86)
probTie = norm.cdf(0.5, -line, 13.86) - norm.cdf(-0.5, -line, 13.86)
return 100. * (probWin + 0.5 * probTie)
|
python
|
{
"resource": ""
}
|
q277557
|
Player.passing
|
test
|
def passing(self, kind='R'):
"""Gets yearly passing stats for the player.
:kind: One of 'R', 'P', or 'B'. Case-insensitive; defaults to 'R'.
:returns: Pandas DataFrame with passing stats.
"""
doc = self.get_doc()
table = (doc('table#passing') if kind == 'R' else
doc('table#passing_playoffs'))
df = sportsref.utils.parse_table(table)
return df
|
python
|
{
"resource": ""
}
|
q277558
|
Player._simple_year_award
|
test
|
def _simple_year_award(self, award_id):
"""Template for simple award functions that simply list years, such as
pro bowls and first-team all pro.
:award_id: The div ID that is appended to "leaderboard_" in selecting
the table's div.
:returns: List of years for the award.
"""
doc = self.get_doc()
table = doc('div#leaderboard_{} table'.format(award_id))
return list(map(int, sportsref.utils.parse_awards_table(table)))
|
python
|
{
"resource": ""
}
|
q277559
|
Team.name
|
test
|
def name(self):
"""Returns the real name of the franchise given the team ID.
Examples:
'nwe' -> 'New England Patriots'
'sea' -> 'Seattle Seahawks'
:returns: A string corresponding to the team's full name.
"""
doc = self.get_main_doc()
headerwords = doc('div#meta h1')[0].text_content().split()
lastIdx = headerwords.index('Franchise')
teamwords = headerwords[:lastIdx]
return ' '.join(teamwords)
|
python
|
{
"resource": ""
}
|
q277560
|
Team.boxscores
|
test
|
def boxscores(self, year):
"""Gets list of BoxScore objects corresponding to the box scores from
that year.
:year: The year for which we want the boxscores; defaults to current
year.
:returns: np.array of strings representing boxscore IDs.
"""
doc = self.get_year_doc(year)
table = doc('table#games')
df = sportsref.utils.parse_table(table)
if df.empty:
return np.array([])
return df.boxscore_id.values
|
python
|
{
"resource": ""
}
|
q277561
|
Team._year_info_pq
|
test
|
def _year_info_pq(self, year, keyword):
"""Returns a PyQuery object containing the info from the meta div at
the top of the team year page with the given keyword.
:year: Int representing the season.
:keyword: A keyword to filter to a single p tag in the meta div.
:returns: A PyQuery object for the selected p element.
"""
doc = self.get_year_doc(year)
p_tags = doc('div#meta div:not(.logo) p')
texts = [p_tag.text_content().strip() for p_tag in p_tags]
try:
return next(
pq(p_tag) for p_tag, text in zip(p_tags, texts)
if keyword.lower() in text.lower()
)
except StopIteration:
if len(texts):
raise ValueError('Keyword not found in any p tag.')
else:
raise ValueError('No meta div p tags found.')
|
python
|
{
"resource": ""
}
|
q277562
|
Team.head_coaches_by_game
|
test
|
def head_coaches_by_game(self, year):
"""Returns head coach data by game.
:year: An int representing the season in question.
:returns: An array with an entry per game of the season that the team
played (including playoffs). Each entry is the head coach's ID for that
game in the season.
"""
coach_str = self._year_info_pq(year, 'Coach').text()
regex = r'(\S+?) \((\d+)-(\d+)-(\d+)\)'
coachAndTenure = []
m = True
while m:
m = re.search(regex, coach_str)
coachID, wins, losses, ties = m.groups()
nextIndex = m.end(4) + 1
coachStr = coachStr[nextIndex:]
tenure = int(wins) + int(losses) + int(ties)
coachAndTenure.append((coachID, tenure))
coachIDs = [
cID for cID, games in coachAndTenure for _ in range(games)
]
return np.array(coachIDs[::-1])
|
python
|
{
"resource": ""
}
|
q277563
|
Team.schedule
|
test
|
def schedule(self, year):
"""Returns a DataFrame with schedule information for the given year.
:year: The year for the season in question.
:returns: Pandas DataFrame with schedule information.
"""
doc = self.get_year_doc(year)
table = doc('table#games')
df = sportsref.utils.parse_table(table)
if df.empty:
return pd.DataFrame()
df = df.loc[df['week_num'].notnull()]
df['week_num'] = np.arange(len(df)) + 1
df['is_win'] = df['game_outcome'] == 'W'
df['is_loss'] = df['game_outcome'] == 'L'
df['is_tie'] = df['game_outcome'] == 'T'
df['is_bye'] = df['game_outcome'].isnull()
df['is_ot'] = df['overtime'].notnull()
return df
|
python
|
{
"resource": ""
}
|
q277564
|
Team.off_coordinator
|
test
|
def off_coordinator(self, year):
"""Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC.
"""
try:
oc_anchor = self._year_info_pq(year, 'Offensive Coordinator')('a')
if oc_anchor:
return oc_anchor.attr['href']
except ValueError:
return None
|
python
|
{
"resource": ""
}
|
q277565
|
Team.def_coordinator
|
test
|
def def_coordinator(self, year):
"""Returns the coach ID for the team's DC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the DC.
"""
try:
dc_anchor = self._year_info_pq(year, 'Defensive Coordinator')('a')
if dc_anchor:
return dc_anchor.attr['href']
except ValueError:
return None
|
python
|
{
"resource": ""
}
|
q277566
|
Team.stadium
|
test
|
def stadium(self, year):
"""Returns the ID for the stadium in which the team played in a given
year.
:year: The year in question.
:returns: A string representing the stadium ID.
"""
anchor = self._year_info_pq(year, 'Stadium')('a')
return sportsref.utils.rel_url_to_id(anchor.attr['href'])
|
python
|
{
"resource": ""
}
|
q277567
|
Team.off_scheme
|
test
|
def off_scheme(self, year):
"""Returns the name of the offensive scheme the team ran in the given
year.
:year: Int representing the season year.
:returns: A string representing the offensive scheme.
"""
scheme_text = self._year_info_pq(year, 'Offensive Scheme').text()
m = re.search(r'Offensive Scheme[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None
|
python
|
{
"resource": ""
}
|
q277568
|
Team.def_alignment
|
test
|
def def_alignment(self, year):
"""Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
"""
scheme_text = self._year_info_pq(year, 'Defensive Alignment').text()
m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None
|
python
|
{
"resource": ""
}
|
q277569
|
Team.off_splits
|
test
|
def off_splits(self, year):
"""Returns a DataFrame of offensive team splits for a season.
:year: int representing the season.
:returns: Pandas DataFrame of split data.
"""
doc = self.get_year_doc('{}_splits'.format(year))
tables = doc('table.stats_table')
dfs = [sportsref.utils.parse_table(table) for table in tables.items()]
dfs = [
df.assign(split=df.columns[0])
.rename(columns={df.columns[0]: 'split_value'})
for df in dfs
]
if not dfs:
return pd.DataFrame()
return pd.concat(dfs).reset_index(drop=True)
|
python
|
{
"resource": ""
}
|
q277570
|
get_html
|
test
|
def get_html(url):
"""Gets the HTML for the given URL using a GET request.
:url: the absolute URL of the desired page.
:returns: a string of HTML.
"""
global last_request_time
with throttle_process_lock:
with throttle_thread_lock:
# sleep until THROTTLE_DELAY secs have passed since last request
wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value)
if wait_left > 0:
time.sleep(wait_left)
# make request
response = requests.get(url)
# update last request time for throttling
last_request_time.value = time.time()
# raise ValueError on 4xx status code, get rid of comments, and return
if 400 <= response.status_code < 500:
raise ValueError(
'Status Code {} received fetching URL "{}"'
.format(response.status_code, url)
)
html = response.text
html = html.replace('<!--', '').replace('-->', '')
return html
|
python
|
{
"resource": ""
}
|
q277571
|
flatten_links
|
test
|
def flatten_links(td, _recurse=False):
"""Flattens relative URLs within text of a table cell to IDs and returns
the result.
:td: the PyQuery object for the HTML to convert
:returns: the string with the links flattened to IDs
"""
# helper function to flatten individual strings/links
def _flatten_node(c):
if isinstance(c, basestring):
return c.strip()
elif 'href' in c.attrib:
c_id = rel_url_to_id(c.attrib['href'])
return c_id if c_id else c.text_content().strip()
else:
return flatten_links(pq(c), _recurse=True)
# if there's no text, just return None
if td is None or not td.text():
return '' if _recurse else None
td.remove('span.note')
return ''.join(_flatten_node(c) for c in td.contents())
|
python
|
{
"resource": ""
}
|
q277572
|
rel_url_to_id
|
test
|
def rel_url_to_id(url):
"""Converts a relative URL to a unique ID.
Here, 'ID' refers generally to the unique ID for a given 'type' that a
given datum has. For example, 'BradTo00' is Tom Brady's player ID - this
corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly,
'201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14.
Supported types:
* player/...
* boxscores/...
* teams/...
* years/...
* leagues/...
* awards/...
* coaches/...
* officials/...
* schools/...
* schools/high_schools.cgi?id=...
:returns: ID associated with the given relative URL.
"""
yearRegex = r'.*/years/(\d{4}).*|.*/gamelog/(\d{4}).*'
playerRegex = r'.*/players/(?:\w/)?(.+?)(?:/|\.html?)'
boxscoresRegex = r'.*/boxscores/(.+?)\.html?'
teamRegex = r'.*/teams/(\w{3})/.*'
coachRegex = r'.*/coaches/(.+?)\.html?'
stadiumRegex = r'.*/stadiums/(.+?)\.html?'
refRegex = r'.*/officials/(.+?r)\.html?'
collegeRegex = r'.*/schools/(\S+?)/.*|.*college=([^&]+)'
hsRegex = r'.*/schools/high_schools\.cgi\?id=([^\&]{8})'
bsDateRegex = r'.*/boxscores/index\.f?cgi\?(month=\d+&day=\d+&year=\d+)'
leagueRegex = r'.*/leagues/(.*_\d{4}).*'
awardRegex = r'.*/awards/(.+)\.htm'
regexes = [
yearRegex,
playerRegex,
boxscoresRegex,
teamRegex,
coachRegex,
stadiumRegex,
refRegex,
collegeRegex,
hsRegex,
bsDateRegex,
leagueRegex,
awardRegex,
]
for regex in regexes:
match = re.match(regex, url, re.I)
if match:
return [_f for _f in match.groups() if _f][0]
# things we don't want to match but don't want to print a WARNING
if any(
url.startswith(s) for s in
(
'/play-index/',
)
):
return url
print('WARNING. NO MATCH WAS FOUND FOR "{}"'.format(url))
return url
|
python
|
{
"resource": ""
}
|
q277573
|
_kwargs_to_qs
|
test
|
def _kwargs_to_qs(**kwargs):
"""Converts kwargs given to PSF to a querystring.
:returns: the querystring.
"""
# start with defaults
inpOptDef = inputs_options_defaults()
opts = {
name: dct['value']
for name, dct in inpOptDef.items()
}
# clean up keys and values
for k, v in kwargs.items():
del kwargs[k]
# bool => 'Y'|'N'
if isinstance(v, bool):
kwargs[k] = 'Y' if v else 'N'
# tm, team => team_id
elif k.lower() in ('tm', 'team'):
kwargs['team_id'] = v
# yr, year, yrs, years => year_min, year_max
elif k.lower() in ('yr', 'year', 'yrs', 'years'):
if isinstance(v, collections.Iterable):
lst = list(v)
kwargs['year_min'] = min(lst)
kwargs['year_max'] = max(lst)
elif isinstance(v, basestring):
v = list(map(int, v.split(',')))
kwargs['year_min'] = min(v)
kwargs['year_max'] = max(v)
else:
kwargs['year_min'] = v
kwargs['year_max'] = v
# pos, position, positions => pos[]
elif k.lower() in ('pos', 'position', 'positions'):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['pos[]'] = v
# draft_pos, ... => draft_pos[]
elif k.lower() in (
'draft_pos', 'draftpos', 'draftposition', 'draftpositions',
'draft_position', 'draft_positions'
):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['draft_pos[]'] = v
# if not one of these cases, put it back in kwargs
else:
kwargs[k] = v
# update based on kwargs
for k, v in kwargs.items():
# if overwriting a default, overwrite it (with a list so the
# opts -> querystring list comp works)
if k in opts or k in ('pos[]', 'draft_pos[]'):
# if multiple values separated by commas, split em
if isinstance(v, basestring):
v = v.split(',')
# otherwise, make sure it's a list
elif not isinstance(v, collections.Iterable):
v = [v]
# then, add list of values to the querystring dict *opts*
opts[k] = v
if 'draft' in k:
opts['draft'] = [1]
opts['request'] = [1]
opts['offset'] = [kwargs.get('offset', 0)]
qs = '&'.join(
'{}={}'.format(urllib.parse.quote_plus(name), val)
for name, vals in sorted(opts.items()) for val in vals
)
return qs
|
python
|
{
"resource": ""
}
|
q277574
|
_Streamer__read_process
|
test
|
def _Streamer__read_process(self, path, read_size, cbuf, stop, barrier, cyclic, offset, read_skip, sync):
"""
Main function for the processes that read from the HDF5 file.
:param self: A reference to the streamer object that created these processes.
:param path: The HDF5 path to the node to be read from.
:param read_size: The length of the block along the outer dimension to read.
:param cbuf: The circular buffer to place read elements into.
:param stop: The Event that signals the process to stop reading.
:param barrier: The Barrier that synchonises read cycles.
:param cyclic: True if the process should read cyclically.
:param offset: Offset into the dataset that this process should start reading at.
:param read_skip: How many element to skip on each iteration.
:param sync: GuardSynchonizer to order writes to the buffer.
:return: Nothing
"""
# Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables.
import tables as tb
h5_file = tb.open_file(self.filename, 'r', **self.h5_kw_args)
ary = h5_file.get_node(path)
i = offset
while not stop.is_set():
vals = ary[i:i + read_size]
# If the read goes off the end of the dataset, then wrap to the start.
if i + read_size > len(ary):
vals = np.concatenate([vals, ary[0:read_size - len(vals)]])
if sync is None:
# If no ordering is requested, then just write to the next available space in the buffer.
with cbuf.put_direct() as put_ary:
put_ary[:] = vals
else:
# Otherwise, use the sync object to ensure that writes occur in the order provided by i.
# So i = 0 will write first, then i = block_size, then i = 2*block_size, etc...
# The sync object has two ordered barriers so that acquisition and release of the buffer spaces
# are synchronized in order, but the actual writing to the buffer can happen simultaneously.
# If only one barrier were used, writing to the buffer would be linearised.
with sync.do(cbuf.put_direct(), i, (i+read_size) % len(ary)) as put_ary:
put_ary[:] = vals
i += read_skip
if cyclic:
# If the next iteration is past the end of the dataset, wrap it around.
if i >= len(ary):
i %= len(ary)
barrier.wait()
else:
# But if cyclic mode is disabled, break the loop as the work is now done.
if i + read_size > len(ary):
break
|
python
|
{
"resource": ""
}
|
q277575
|
SharedCircBuf.put_direct
|
test
|
def put_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is room to write into the buffer.
:return: A guard object that returns the buffer element.
"""
# Once the guard is released, write_idx will be placed into read_queue.
return self.Guard(self.read_queue, self.arys, self.__put_idx)
|
python
|
{
"resource": ""
}
|
q277576
|
SharedCircBuf.get_direct
|
test
|
def get_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is data that can be read.
:return: A guard object that returns the buffer element.
"""
read_idx = self.__get_idx()
if read_idx is QueueClosed:
return QueueClosed
# Once the guard is released, read_idx will be placed into write_queue.
return self.Guard(self.write_queue, self.arys, lambda: read_idx)
|
python
|
{
"resource": ""
}
|
q277577
|
SharedCircBuf.close
|
test
|
def close(self):
"""Close the queue, signalling that no more data can be put into the queue."""
self.read_queue.put(QueueClosed)
self.write_queue.put(QueueClosed)
|
python
|
{
"resource": ""
}
|
q277578
|
Streamer.__get_batch
|
test
|
def __get_batch(self, path, length, last=False):
"""
Get a block of data from the node at path.
:param path: The path to the node to read from.
:param length: The length along the outer dimension to read.
:param last: True if the remainder elements should be read.
:return: A copy of the requested block of data as a numpy array.
"""
import tables
h5_file = tables.open_file(self.filename, 'r')
h5_node = h5_file.get_node(path)
if len(h5_node) == 0:
raise Exception("Cannot read from empty dataset.")
# If the length isn't specified, then fall back to default values.
if length is None:
chunkshape = h5_node.chunkshape
# If the array isn't chunked, then try to make the block close to 128KB.
if chunkshape is None:
default_length = 128*2**10//h5_node[0].nbytes # Divides by one row of the dataset.
length = min(h5_node.shape[0], default_length)
# If it is chunked, then use the chunkshape for best performance.
else:
length = chunkshape[0]
if last:
example = h5_node[length*(len(h5_node)//length):].copy()
else:
example = h5_node[:length].copy()
h5_file.close()
return example
|
python
|
{
"resource": ""
}
|
q277579
|
Streamer.get_remainder
|
test
|
def get_remainder(self, path, block_size):
"""
Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode.
:param path: The HDF5 path to the dataset to be read.
:param block_size: The block size is used to calculate which elements will remain.
:return: A copy of the remainder elements as a numpy array.
"""
return self.__get_batch(path, length=block_size, last=True)
|
python
|
{
"resource": ""
}
|
q277580
|
Streamer.get_queue
|
test
|
def get_queue(self, path, n_procs=4, read_ahead=None, cyclic=False, block_size=None, ordered=False):
"""
Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the
block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it
to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements
will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps
around the end and includes element from the beginning of the dataset. By default, blocks are returned in the
order in which they become available. The ordered option will force blocks to be returned in on-disk order.
:param path: The HDF5 path to the dataset that should be read.
:param n_procs: The number of background processes used to read the datset in parallel.
:param read_ahead: The number of blocks to allocate in the internal buffer.
:param cyclic: True if the queue should wrap at the end of the dataset.
:param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of
the chunk size, or to a 128KB sized block if the dataset is not chunked.
:param ordered: Force the reader return data in on-disk order. May result in performance penalty.
:return: A queue object that allows access to the internal buffer.
"""
# Get a block_size length of elements from the dataset to serve as a template for creating the buffer.
# If block_size=None, then get_batch calculates an appropriate block size.
example = self.__get_batch(path, block_size)
block_size = example.shape[0]
if read_ahead is None:
# 2x No. of processes for writing, 1 extra for reading.
read_ahead = 2*n_procs + 1
cbuf = SharedCircBuf(read_ahead, example)
stop = multiprocessing.Event()
barrier = Barrier(n_procs)
# If ordering has been requested, create a synchronizer.
sync = GuardSynchronizer() if ordered else None
procs = []
for i in range(n_procs):
# Each process is offset in the dataset by i*block_size
# The skip length is set to n_procs*block_size so that no block is read by 2 processes.
process = multiprocessing.Process(target=_Streamer__read_process, args=(
self, path, block_size, cbuf, stop, barrier, cyclic,
i * block_size, n_procs * block_size, sync
))
process.daemon = True
process.start()
procs.append(process)
# If the queue is not cyclic, then the cessation of reading data needs to be monitored.
if not cyclic:
# This closure defines a background thread that waits until all processes have finished.
# At this point, all data from the dataset has been read, and the buffer is closed.
def monitor():
for p in procs:
p.join()
cbuf.close()
monitor_thread = threading.Thread(target=monitor)
monitor_thread.daemon = True
monitor_thread.start()
return Streamer.Queue(cbuf, stop, block_size)
|
python
|
{
"resource": ""
}
|
q277581
|
Streamer.get_generator
|
test
|
def get_generator(self, path, *args, **kw_args):
"""
Get a generator that allows convenient access to the streamed data.
Elements from the dataset are returned from the generator one row at a time.
Unlike the direct access queue, this generator also returns the remainder elements.
Additional arguments are forwarded to get_queue.
See the get_queue method for documentation of these parameters.
:param path:
:return: A generator that iterates over the rows in the dataset.
"""
q = self.get_queue(path=path, *args, **kw_args)
try:
# This generator just implements a standard access pattern for the direct access queue.
for guard in q.iter():
with guard as batch:
batch_copy = batch.copy()
for row in batch_copy:
yield row
last_batch = self.get_remainder(path, q.block_size)
for row in last_batch:
yield row
finally:
q.close()
|
python
|
{
"resource": ""
}
|
q277582
|
parse
|
test
|
def parse(ifp, pb_cls, **kwargs):
"""Parse a stream.
Args:
ifp (string or file-like object): input stream.
pb_cls (protobuf.message.Message.__class__): The class object of
the protobuf message type encoded in the stream.
"""
mode = 'rb'
if isinstance(ifp, str):
istream = open(ifp, mode=mode, **kwargs)
else:
istream = open(fileobj=ifp, mode=mode, **kwargs)
with istream:
for data in istream:
pb_obj = pb_cls()
pb_obj.ParseFromString(data)
yield pb_obj
|
python
|
{
"resource": ""
}
|
q277583
|
dump
|
test
|
def dump(ofp, *pb_objs, **kwargs):
"""Write to a stream.
Args:
ofp (string or file-like object): output stream.
pb_objs (*protobuf.message.Message): list of protobuf message objects
to be written.
"""
mode = 'wb'
if isinstance(ofp, str):
ostream = open(ofp, mode=mode, **kwargs)
else:
ostream = open(fileobj=ofp, mode=mode, **kwargs)
with ostream:
ostream.write(*pb_objs)
|
python
|
{
"resource": ""
}
|
q277584
|
Stream._read_varint
|
test
|
def _read_varint(self):
"""Read a varint from file, parse it, and return the decoded integer.
"""
buff = self._fd.read(1)
if buff == b'':
return 0
while (bytearray(buff)[-1] & 0x80) >> 7 == 1: # while the MSB is 1
new_byte = self._fd.read(1)
if new_byte == b'':
raise EOFError('unexpected EOF.')
buff += new_byte
varint, _ = decodeVarint(buff, 0)
return varint
|
python
|
{
"resource": ""
}
|
q277585
|
Stream._get_objs
|
test
|
def _get_objs(self):
"""A generator yielding all protobuf object data in the file. It is the
main parser of the stream encoding.
"""
while True:
count = self._read_varint()
if count == 0:
break
# Read a group containing `count` number of objects.
for _ in range(count):
size = self._read_varint()
if size == 0:
raise EOFError('unexpected EOF.')
# Read an object from the object group.
yield self._fd.read(size)
if self._group_delim:
yield self._delimiter() if self._delimiter is not None \
else None
|
python
|
{
"resource": ""
}
|
q277586
|
Stream.close
|
test
|
def close(self):
"""Close the stream."""
self.flush()
if self._myfd is not None:
self._myfd.close()
self._myfd = None
|
python
|
{
"resource": ""
}
|
q277587
|
Stream.write
|
test
|
def write(self, *pb2_obj):
"""Write a group of one or more protobuf objects to the file. Multiple
object groups can be written by calling this method several times
before closing stream or exiting the runtime context.
The input protobuf objects get buffered and will be written down when
the number of buffered objects exceed the `self._buffer_size`.
Args:
pb2_obj (*protobuf.message.Message): list of protobuf messages.
"""
base = len(self._write_buff)
for idx, obj in enumerate(pb2_obj):
if self._buffer_size > 0 and \
(idx + base) != 0 and \
(idx + base) % self._buffer_size == 0:
self.flush()
self._write_buff.append(obj)
if self._buffer_size == 0:
self.flush()
|
python
|
{
"resource": ""
}
|
q277588
|
Stream.flush
|
test
|
def flush(self):
"""Write down buffer to the file."""
if not self.is_output():
return
count = len(self._write_buff)
if count == 0:
return
encodeVarint(self._fd.write, count, True)
for obj in self._write_buff:
obj_str = obj.SerializeToString()
encodeVarint(self._fd.write, len(obj_str), True)
self._fd.write(obj_str)
self._write_buff = []
|
python
|
{
"resource": ""
}
|
q277589
|
Game.get_game_dir
|
test
|
def get_game_dir(self, username=False):
"""Returns joined game directory path relative to Steamapps"""
if not self.common and not username:
raise RuntimeError("Can't determine this game's directory without username")
if self.common:
subdir = "common"
else:
subdir = "username"
subsubdir = self.dir
if WIN32 or CYGWIN:
subsubdir = subsubdir.lower()
return os.path.join(subdir, subsubdir)
|
python
|
{
"resource": ""
}
|
q277590
|
TextInput.input_text_with_keyboard_emulation
|
test
|
def input_text_with_keyboard_emulation(self, text):
"""
Works around the problem of emulating user interactions with text inputs.
Emulates a key-down action on the first char of the input. This way, implementations which
require key-down event to trigger auto-suggest are testable.
Then the chains sends the rest of the text and releases the key.
"""
ActionChains(self.driver).key_down(text).key_up(Keys.CONTROL).perform()
|
python
|
{
"resource": ""
}
|
q277591
|
make_fake_movie
|
test
|
def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None,
bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0,
tau=1., sigma=0.001, seed=None):
"""
Generate 2D fake fluorescence movie
Arguments:
---------------------------------------------------------------------------
nframes: number of timebins to simulate
mask_shape: tuple (nrows, ncols), shape of a single movie frame
mask_center: tuple (x, y), pixel coords of cell center
bg_intensity: scalar, amplitude of (static) baseline fluorescence
mask_sigma: scalar, standard deviation of Gaussian mask
dt: timestep (s)
rate: mean spike rate (Hz)
tau: time constant of decay in calcium concentration (s)
sigma: SD of additive noise on fluorescence
seed: Seed for RNG
Returns:
---------------------------------------------------------------------------
F: fluorescence [npixels, nframes]
c: calcium concentration [nframes,]
n: spike train [nframes,]
theta: tuple of true model parameters:
(sigma, alpha, beta, lambda, gamma)
"""
gen = np.random.RandomState(seed)
# poisson spikes
n = gen.poisson(rate * dt, size=nframes)
# internal calcium dynamics
gamma = np.exp(-dt / tau)
c = signal.lfilter(np.r_[1], np.r_[1, -gamma], n, axis=0)
# pixel weights (sum == 1)
nr, nc = mask_shape
npix = nr * nc
if mask_center is None:
mask_center = (nc // 2., nr // 2.)
a, b = mask_center
y, x = np.ogrid[:nr, :nc]
xs = (x - a) ** 2.
ys = (y - b) ** 2.
twoss = 2. * mask_sigma ** 2.
alpha = np.exp(-1 * ((xs / twoss) + (ys / twoss))).ravel()
alpha /= alpha.sum()
# background fluorescence
beta = gen.randn(npix) * bg_intensity
# firing rate (spike probability per sec)
lamb = rate
# spatially & temporally white noise
epsilon = gen.randn(npix, nframes) * sigma
# simulated fluorescence
F = c[None, :] * alpha[:, None] + beta[:, None] + epsilon
theta = (sigma, alpha, beta, lamb, gamma)
return F, c, n, theta
|
python
|
{
"resource": ""
}
|
q277592
|
ElementWithTraits.evaluate_traits
|
test
|
def evaluate_traits(self):
"""
Evaluates traits and returns a list containing the description of traits which are not true.
Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option
only for debugging purposes.
"""
return_value = []
for trait in self.traits:
if not trait.condition():
if not self.traits_eager_evaluation:
return [trait.description]
else:
return_value.append(trait.description)
return return_value
|
python
|
{
"resource": ""
}
|
q277593
|
Wait.until_condition
|
test
|
def until_condition(self, condition, condition_description):
"""
Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
while True:
try:
if not hasattr(condition, '__call__'):
raise TypeError("condition is not callable")
value = condition()
if type(value) is bool and value is not False:
return value
elif type(value) is not bool and value is not None:
return value
else:
logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover
except self._ignored_exceptions as ex:
logger.debug("Captured {0} : {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
str(ex))) # pragma: no cover
time.sleep(self._poll)
count += 1
if time.time() > end_time: # pragma: no cover
break
raise TimeoutException(
msg="condition <" + condition_description + "> was not true after " + str(self._timeout) + " seconds.")
|
python
|
{
"resource": ""
}
|
q277594
|
Wait.until_traits_are_present
|
test
|
def until_traits_are_present(self, element_with_traits):
"""
Waits until all traits are present.
If any of the traits is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
missing_traits_descriptions = None
while True:
missing_traits_descriptions = []
try:
missing_traits_descriptions = element_with_traits.evaluate_traits()
if len(missing_traits_descriptions) == 0:
return True
else:
logger.debug("#{0} - wait until all traits are present: <{1}>".format(str(count), '> <'.join(
missing_traits_descriptions)))
except self._ignored_exceptions as ex: # pragma: no cover
logger.debug("Captured {0}: {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
str(ex))) # pragma: no cover
pass # pragma: no cover
time.sleep(self._poll)
count += 1
if time.time() > end_time:
break
raise TimeoutException(
msg="conditions " + '<' + '> <'.join(missing_traits_descriptions) + '>' + " not true after " + str(
self._timeout) + " seconds.")
|
python
|
{
"resource": ""
}
|
q277595
|
Wait.with_ignored_exceptions
|
test
|
def with_ignored_exceptions(self, *ignored_exceptions):
"""
Set a list of exceptions that should be ignored inside the wait loop.
"""
for exception in ignored_exceptions:
self._ignored_exceptions = self._ignored_exceptions + (exception,)
return self
|
python
|
{
"resource": ""
}
|
q277596
|
NADReceiver.main_volume
|
test
|
def main_volume(self, operator, value=None):
"""
Execute Main.Volume.
Returns int
"""
try:
res = int(self.exec_command('main', 'volume', operator, value))
return res
except (ValueError, TypeError):
pass
return None
|
python
|
{
"resource": ""
}
|
q277597
|
NADReceiver.main_source
|
test
|
def main_source(self, operator, value=None):
"""
Execute Main.Source.
Returns int
"""
try:
source = int(self.exec_command('main', 'source', operator, value))
return source
except (ValueError, TypeError):
pass
return None
|
python
|
{
"resource": ""
}
|
q277598
|
NADReceiverTCP._send
|
test
|
def _send(self, message, read_reply=False):
"""Send a command string to the amplifier."""
sock = None
for tries in range(0, 3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self.PORT))
break
except (ConnectionError, BrokenPipeError):
if tries == 3:
print("socket connect failed.")
return
sleep(0.1)
sock.send(codecs.decode(message, 'hex_codec'))
if read_reply:
sleep(0.1)
reply = ''
tries = 0
max_tries = 20
while len(reply) < len(message) and tries < max_tries:
try:
reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\
.decode("utf-8")
except (ConnectionError, BrokenPipeError):
pass
tries += 1
sock.close()
if tries >= max_tries:
return
return reply
sock.close()
|
python
|
{
"resource": ""
}
|
q277599
|
NADReceiverTCP.status
|
test
|
def status(self):
"""
Return the status of the device.
Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool),
'muted' (bool) and 'source' (str).
"""
nad_reply = self._send(self.POLL_VOLUME +
self.POLL_POWER +
self.POLL_MUTED +
self.POLL_SOURCE, read_reply=True)
if nad_reply is None:
return
# split reply into parts of 10 characters
num_chars = 10
nad_status = [nad_reply[i:i + num_chars]
for i in range(0, len(nad_reply), num_chars)]
return {'volume': int(nad_status[0][-2:], 16),
'power': nad_status[1][-2:] == '01',
'muted': nad_status[2][-2:] == '01',
'source': self.SOURCES_REVERSED[nad_status[3][-2:]]}
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.