_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q277500
|
validate
|
test
|
def validate(metric_class):
"""
Does basic Metric option validation.
"""
if not hasattr(metric_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for metric %s." % metric_class.__name__)
if
|
python
|
{
"resource": ""
}
|
q277501
|
get_statistic_by_name
|
test
|
def get_statistic_by_name(stat_name):
"""
Fetches a statistics based on the given class name. Does a look-up
in the gadgets' registered statistics to find the specified one.
"""
if stat_name == 'ALL':
return get_statistic_models()
for stat in get_statistic_models():
|
python
|
{
"resource": ""
}
|
q277502
|
calculate_statistics
|
test
|
def calculate_statistics(stat, frequencies):
"""
Calculates all of the metrics associated with the registered gadgets.
"""
stats = ensure_list(stat)
frequencies = ensure_list(frequencies)
for stat in stats:
for f in frequencies:
|
python
|
{
"resource": ""
}
|
q277503
|
autodiscover
|
test
|
def autodiscover():
"""
Auto-discover INSTALLED_APPS gadgets.py modules and fail silently when
not present. This forces an import on them to register any gadgets they
may want.
"""
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's gadgets module.
try:
import_module('%s.gadgets' % app)
except:
|
python
|
{
"resource": ""
}
|
q277504
|
csv_dump
|
test
|
def csv_dump(request, uid):
"""
Returns a CSV dump of all of the specified metric's counts
and cumulative counts.
"""
metric = Metric.objects.get(uid=uid)
frequency = request.GET.get('frequency', settings.STATISTIC_FREQUENCY_DAILY)
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s%s.csv' % (uid, datetime.datetime.now().strftime("%Y%m%d-%H%M"))
writer =
|
python
|
{
"resource": ""
}
|
q277505
|
Command.handle
|
test
|
def handle(self, *args, **kwargs):
"""
Command handler for the "metrics" command.
"""
frequency = kwargs['frequency']
frequencies = settings.STATISTIC_FREQUENCY_ALL if frequency == 'a' else (frequency.split(',') if ',' in frequency else [frequency])
if kwargs['list']:
maintenance.list_statistics()
# if we're supposed to calculate the latest statistics
|
python
|
{
"resource": ""
}
|
q277506
|
get_GET_array
|
test
|
def get_GET_array(request, var_name, fail_silently=True):
"""
Returns the GET array's contents for the specified variable.
"""
vals = request.GET.getlist(var_name)
if not vals:
if fail_silently:
return []
|
python
|
{
"resource": ""
}
|
q277507
|
get_GET_bool
|
test
|
def get_GET_bool(request, var_name, default=True):
"""
Tries to extract a boolean variable from the specified request.
"""
val = request.GET.get(var_name, default)
|
python
|
{
"resource": ""
}
|
q277508
|
get_next_colour
|
test
|
def get_next_colour():
"""
Gets the next colour in the Geckoboard colour list.
"""
colour = settings.GECKOBOARD_COLOURS[get_next_colour.cur_colour]
get_next_colour.cur_colour += 1
|
python
|
{
"resource": ""
}
|
q277509
|
get_gecko_params
|
test
|
def get_gecko_params(request, uid=None, days_back=0, cumulative=True,
frequency=settings.STATISTIC_FREQUENCY_DAILY, min_val=0, max_val=100,
chart_type='standard', percentage='show', sort=False):
"""
Returns the default GET parameters for a particular Geckoboard
view request.
"""
return {
'days_back' : int(request.GET.get('daysback', days_back)),
'uid' : request.GET.get('uid', uid),
'uids' : get_GET_array(request, 'uids[]'),
'cumulative' : get_GET_bool(request, 'cumulative', cumulative),
'frequency' : request.GET.get('frequency', frequency),
'min'
|
python
|
{
"resource": ""
}
|
q277510
|
geckoboard_number_widget
|
test
|
def geckoboard_number_widget(request):
"""
Returns a number widget for the specified metric's cumulative total.
"""
params = get_gecko_params(request, days_back=7)
metric = Metric.objects.get(uid=params['uid'])
try:
latest_stat = metric.statistics.filter(frequency=params['frequency']).order_by('-date_time')[0]
except IndexError:
return (0, 0)
try:
prev_stat = metric.statistics.filter(frequency=params['frequency'],
|
python
|
{
"resource": ""
}
|
q277511
|
geckoboard_rag_widget
|
test
|
def geckoboard_rag_widget(request):
"""
Searches the GET variables for metric UIDs, and displays
them in a RAG widget.
"""
params = get_gecko_params(request)
print params['uids']
max_date = datetime.now()-timedelta(days=params['days_back'])
metrics = Metric.objects.filter(uid__in=params['uids'])
|
python
|
{
"resource": ""
}
|
q277512
|
geckoboard_line_chart
|
test
|
def geckoboard_line_chart(request):
"""
Returns the data for a line chart for the specified metric.
"""
params = get_gecko_params(request, cumulative=False, days_back=7)
metric = Metric.objects.get(uid=params['uid'])
start_date = datetime.now()-timedelta(days=params['days_back'])
stats = [s for s in metric.statistics.filter(frequency=params['frequency'],
date_time__gte=start_date).order_by('date_time')]
if len(stats) == 0:
raise Exception, _("No statistics for metric %(metric)s.") % {'metric': params['uid']}
dates = [stats[0].date_time]
# get up to 3 dates from the stats
if len(stats) >= 3:
|
python
|
{
"resource": ""
}
|
q277513
|
geckoboard_geckometer
|
test
|
def geckoboard_geckometer(request):
"""
Returns a Geck-o-Meter control for the specified metric.
"""
params = get_gecko_params(request, cumulative=True)
metric = Metric.objects.get(uid=params['uid'])
|
python
|
{
"resource": ""
}
|
q277514
|
geckoboard_funnel
|
test
|
def geckoboard_funnel(request, frequency=settings.STATISTIC_FREQUENCY_DAILY):
"""
Returns a funnel chart for the metrics specified in the GET variables.
"""
# get all the parameters for this function
params = get_gecko_params(request, cumulative=True)
metrics = Metric.objects.filter(uid__in=params['uids'])
items = [(metric.latest_count(frequency=params['frequency'], count=not params['cumulative'],
|
python
|
{
"resource": ""
}
|
q277515
|
AnalyticsView.get_active_stats
|
test
|
def get_active_stats(self):
"""
Returns all of the active statistics for the gadgets currently registered.
"""
stats = []
for gadget in self._registry.values():
|
python
|
{
"resource": ""
}
|
q277516
|
AnalyticsView.register
|
test
|
def register(self, gadget):
"""
Registers a gadget object.
If a gadget is already registered, this will raise AlreadyRegistered.
"""
|
python
|
{
"resource": ""
}
|
q277517
|
AnalyticsView.get_context_data
|
test
|
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
#max_columns, max_rows = self.get_max_dimension()
context = {
'gadgets': self._registry,
'columns': self.columns,
|
python
|
{
"resource": ""
}
|
q277518
|
Command.error
|
test
|
def error(self, message, code=1):
"""
Print error and stop command
"""
|
python
|
{
"resource": ""
}
|
q277519
|
Service.valid
|
test
|
def valid(schema=None):
""" Validation data by specific validictory configuration """
def dec(fun):
@wraps(fun)
def d_func(self, ctx, data, *a, **kw):
try:
|
python
|
{
"resource": ""
}
|
q277520
|
long_input
|
test
|
def long_input(prompt='Multi-line input\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxlines = None, maxlength = None):
"""Get a multi-line string as input"""
lines = []
print(prompt)
lnum = 1
try:
while True:
if maxlines:
if lnum > maxlines:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
|
python
|
{
"resource": ""
}
|
q277521
|
list_input
|
test
|
def list_input(prompt='List input - enter each item on a seperate line\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxitems=None, maxlength=None):
"""Get a list of strings as input"""
lines = []
print(prompt)
inum = 1
try:
while True:
if maxitems:
if inum > maxitems:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
|
python
|
{
"resource": ""
}
|
q277522
|
outfile_input
|
test
|
def outfile_input(extension=None):
"""Get an output file name as input"""
fileok = False
while not fileok:
filename = string_input('File name? ')
if extension:
if not filename.endswith(extension):
if extension.startswith('.'):
filename = filename + extension
else:
filename = filename + '.' + extension
if os.path.isfile(filename):
choice = choice_input(prompt=filename + \
' already exists. Overwrite?',
options=['y', 'n'])
if choice == 'y':
try:
nowtime = time.time()
with open(filename, 'a') as f:
os.utime(filename, (nowtime, nowtime))
fileok = True
except IOError:
print('Write permission denied on ' + filename + \
'. Try again.')
except PermissionError:
print('Write permission denied on ' + filename + \
'. Try again.')
except FileNotFoundError:
print(filename + ': directory not found. Try again.')
else:
choice = choice_input(
prompt=filename + ' does not exist. Create it?',
options=['y', 'n'])
if choice
|
python
|
{
"resource": ""
}
|
q277523
|
Team.schedule
|
test
|
def schedule(self, year):
"""Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information.
"""
|
python
|
{
"resource": ""
}
|
q277524
|
BoxScore.winner
|
test
|
def winner(self):
"""Returns the team ID of the winning team. Returns NaN if a tie."""
hmScore = self.home_score()
awScore = self.away_score()
if hmScore > awScore:
return self.home()
|
python
|
{
"resource": ""
}
|
q277525
|
BoxScore.season
|
test
|
def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year
|
python
|
{
"resource": ""
}
|
q277526
|
BoxScore.starters
|
test
|
def starters(self):
"""Returns a DataFrame where each row is an entry in the starters table
from PFR.
The columns are:
* player_id - the PFR player ID for the player (note that this column
is not necessarily all unique; that is, one player can be a starter in
multiple positions, in theory).
* playerName - the listed name of the player; this too is not
necessarily unique.
* position - the position at which the player started for their team.
* team - the team for which the player started.
* home - True if the player's team was at home, False if they were away
* offense - True if the player is starting on an offensive position,
False if defense.
:returns: A pandas DataFrame. See the description for details.
"""
doc = self.get_doc()
a = doc('table#vis_starters')
h = doc('table#home_starters')
data = []
for h, table in enumerate((a, h)):
team = self.home() if h else self.away()
|
python
|
{
"resource": ""
}
|
q277527
|
BoxScore.surface
|
test
|
def surface(self):
"""The playing surface on which the game was played.
:returns: string representing the type of surface. Returns np.nan if
not avaiable.
"""
doc = self.get_doc()
|
python
|
{
"resource": ""
}
|
q277528
|
BoxScore.coin_toss
|
test
|
def coin_toss(self):
"""Gets information relating to the opening coin toss.
Keys are:
* wonToss - contains the ID of the team that won the toss
* deferred - bool whether the team that won the toss deferred it
:returns: Dictionary of coin toss-related info.
"""
doc =
|
python
|
{
"resource": ""
}
|
q277529
|
BoxScore.weather
|
test
|
def weather(self):
"""Returns a dictionary of weather-related info.
Keys of the returned dict:
* temp
* windChill
* relHumidity
* windMPH
:returns: Dict of weather data.
"""
doc = self.get_doc()
table = doc('table#game_info')
giTable = sportsref.utils.parse_info_table(table)
if 'weather' in giTable:
regex = (
r'(?:(?P<temp>\-?\d+) degrees )?'
r'(?:relative humidity (?P<relHumidity>\d+)%, )?'
r'(?:wind (?P<windMPH>\d+) mph, )?'
r'(?:wind chill (?P<windChill>\-?\d+))?'
)
m = re.match(regex, giTable['weather'])
d = m.groupdict()
# cast values to int
for k in d:
try:
d[k] = int(d[k])
|
python
|
{
"resource": ""
}
|
q277530
|
BoxScore.ref_info
|
test
|
def ref_info(self):
"""Gets a dictionary of ref positions and the ref IDs of the refs for
that game.
|
python
|
{
"resource": ""
}
|
q277531
|
Season.schedule
|
test
|
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
|
python
|
{
"resource": ""
}
|
q277532
|
Season.standings
|
test
|
def standings(self):
"""Returns a DataFrame containing standings information."""
doc = self.get_sub_doc('standings')
east_table = doc('table#divs_standings_E')
east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))
east_df.sort_values('wins', ascending=False, inplace=True)
east_df['seed'] = range(1, len(east_df) + 1)
east_df['conference'] = 'E'
west_table = doc('table#divs_standings_W')
west_df = sportsref.utils.parse_table(west_table)
west_df.sort_values('wins', ascending=False, inplace=True)
west_df['seed'] = range(1, len(west_df) + 1)
west_df['conference'] = 'W'
full_df = pd.concat([east_df, west_df], axis=0).reset_index(drop=True)
full_df['team_id'] = full_df.team_id.str.extract(r'(\w+)\W*\(\d+\)', expand=False)
full_df['gb'] = [gb
|
python
|
{
"resource": ""
}
|
q277533
|
Season._get_team_stats_table
|
test
|
def _get_team_stats_table(self, selector):
"""Helper function for stats tables on season pages. Returns a
DataFrame."""
|
python
|
{
"resource": ""
}
|
q277534
|
Season.roy_voting
|
test
|
def roy_voting(self):
"""Returns a DataFrame containing information about ROY voting."""
url = '{}/awards/awards_{}.html'.format(sportsref.nba.BASE_URL, self.yr)
|
python
|
{
"resource": ""
}
|
q277535
|
BoxScore.linescore
|
test
|
def linescore(self):
"""Returns the linescore for the game as a DataFrame."""
doc = self.get_main_doc()
table = doc('table#line_score')
columns = [th.text() for th in table('tr.thead').items('th')]
columns[0] = 'team_id'
data = [
[sportsref.utils.flatten_links(td)
|
python
|
{
"resource": ""
}
|
q277536
|
BoxScore.season
|
test
|
def season(self):
"""
Returns the year ID of the season in which this game took place.
:returns: An int
|
python
|
{
"resource": ""
}
|
q277537
|
BoxScore._get_player_stats
|
test
|
def _get_player_stats(self, table_id_fmt):
"""Returns a DataFrame of player stats from the game (either basic or
advanced, depending on the argument.
:param table_id_fmt: Format string for str.format with a placeholder
for the team ID (e.g. 'box_{}_basic')
:returns: DataFrame of player stats
"""
# get data
doc = self.get_main_doc()
tms = self.away(), self.home()
tm_ids = [table_id_fmt.format(tm) for tm in tms]
tables = [doc('table#{}'.format(tm_id).lower()) for tm_id in tm_ids]
dfs = [sportsref.utils.parse_table(table) for table in tables]
# clean data and add features
for i, (tm, df) in enumerate(zip(tms, dfs)):
|
python
|
{
"resource": ""
}
|
q277538
|
switch_to_dir
|
test
|
def switch_to_dir(dirPath):
"""
Decorator that switches to given directory before executing function, and
then returning to orignal directory.
"""
def decorator(func):
@funcutils.wraps(func)
def wrapper(*args, **kwargs):
orig_cwd = os.getcwd()
|
python
|
{
"resource": ""
}
|
q277539
|
cache
|
test
|
def cache(func):
"""Caches the HTML returned by the specified function `func`. Caches it in
the user cache determined by the appdirs package.
"""
CACHE_DIR = appdirs.user_cache_dir('sportsref', getpass.getuser())
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
@funcutils.wraps(func)
def wrapper(url):
# hash based on the URL
file_hash = hashlib.md5()
encoded_url = url.encode(errors='replace')
file_hash.update(encoded_url)
file_hash = file_hash.hexdigest()
filename = '{}/{}'.format(CACHE_DIR, file_hash)
sport_id = None
for a_base_url, a_sport_id in sportsref.SITE_ABBREV.items():
if url.startswith(a_base_url):
sport_id = a_sport_id
break
else:
print('No sport ID found for {}, not able to check cache'.format(url))
# check whether cache is valid or stale
file_exists = os.path.isfile(filename)
if sport_id and file_exists:
cur_time = int(time.time())
mod_time = int(os.path.getmtime(filename))
days_since_mod = datetime.timedelta(seconds=(cur_time - mod_time)).days
|
python
|
{
"resource": ""
}
|
q277540
|
get_class_instance_key
|
test
|
def get_class_instance_key(cls, args, kwargs):
"""
Returns a unique identifier for a class instantiation.
"""
l = [id(cls)]
for arg
|
python
|
{
"resource": ""
}
|
q277541
|
memoize
|
test
|
def memoize(fun):
"""A decorator for memoizing functions.
Only works on functions that take simple arguments - arguments that take
list-like or dict-like arguments will not be memoized, and this function
will raise a TypeError.
"""
@funcutils.wraps(fun)
def wrapper(*args, **kwargs):
do_memoization = sportsref.get_option('memoize')
if not do_memoization:
return fun(*args, **kwargs)
hash_args = tuple(args)
|
python
|
{
"resource": ""
}
|
q277542
|
Player.age
|
test
|
def age(self, year, month=2, day=1):
"""Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float.
"""
doc = self.get_main_doc()
date_string = doc('span[itemprop="birthDate"]').attr('data-birth')
|
python
|
{
"resource": ""
}
|
q277543
|
Player._get_stats_table
|
test
|
def _get_stats_table(self, table_id, kind='R', summary=False):
"""Gets a stats table from the player page; helper function that does
the work for per-game, per-100-poss, etc. stats.
:table_id: the ID of the HTML table.
:kind: specifies regular season, playoffs, or both. One of 'R', 'P',
'B'. Defaults to 'R'.
:returns: A DataFrame of stats.
"""
doc = self.get_main_doc()
table_id = 'table#{}{}'.format(
|
python
|
{
"resource": ""
}
|
q277544
|
Player.stats_per_game
|
test
|
def stats_per_game(self, kind='R', summary=False):
"""Returns a DataFrame of per-game box score stats."""
|
python
|
{
"resource": ""
}
|
q277545
|
Player.stats_totals
|
test
|
def stats_totals(self, kind='R', summary=False):
"""Returns a DataFrame of
|
python
|
{
"resource": ""
}
|
q277546
|
Player.stats_per36
|
test
|
def stats_per36(self, kind='R', summary=False):
"""Returns a DataFrame of per-36-minutes stats."""
|
python
|
{
"resource": ""
}
|
q277547
|
Player.stats_per100
|
test
|
def stats_per100(self, kind='R', summary=False):
"""Returns a DataFrame of per-100-possession stats."""
|
python
|
{
"resource": ""
}
|
q277548
|
Player.stats_advanced
|
test
|
def stats_advanced(self, kind='R', summary=False):
"""Returns a DataFrame of advanced stats."""
|
python
|
{
"resource": ""
}
|
q277549
|
Player.stats_shooting
|
test
|
def stats_shooting(self, kind='R', summary=False):
"""Returns a DataFrame of shooting stats."""
|
python
|
{
"resource": ""
}
|
q277550
|
Player.stats_pbp
|
test
|
def stats_pbp(self, kind='R', summary=False):
"""Returns a DataFrame of play-by-play stats."""
|
python
|
{
"resource": ""
}
|
q277551
|
Player.gamelog_basic
|
test
|
def gamelog_basic(self, year, kind='R'):
"""Returns a table of a player's basic game-by-game stats for a season.
:param year: The year representing the desired season.
:param kind: specifies regular season, playoffs, or both. One of 'R',
'P', 'B'. Defaults to 'R'.
|
python
|
{
"resource": ""
}
|
q277552
|
DeleteHandler.get
|
test
|
def get(self):
'''
Please don't do this in production environments.
'''
self.write("Memory Session Object Demo:")
if "sv" in self.session:
current_value = self.session["sv"]
self.write("current sv value is %s, and system will delete this value.<br/>" % self.session["sv"])
|
python
|
{
"resource": ""
}
|
q277553
|
expand_details
|
test
|
def expand_details(df, detailCol='detail'):
"""Expands the details column of the given dataframe and returns the
resulting DataFrame.
:df: The input DataFrame.
:detailCol: The detail column name.
:returns: Returns DataFrame with new columns from pbp parsing.
"""
df = copy.deepcopy(df)
df['detail'] = df[detailCol]
dicts = [sportsref.nfl.pbp.parse_play_details(detail) for detail in df['detail'].values]
# clean up unmatched details
cols = {c for d in dicts if d for c in d.keys()}
blankEntry = {c: np.nan for c in cols}
newDicts = [d if d else blankEntry for d in dicts]
# get details DataFrame and merge it with original to create main DataFrame
details = pd.DataFrame(newDicts)
df = pd.merge(df, details, left_index=True, right_index=True)
# add isError column
errors = [i for i,
|
python
|
{
"resource": ""
}
|
q277554
|
_add_team_columns
|
test
|
def _add_team_columns(features):
"""Function that adds 'team' and 'opp' columns to the features by iterating
through the rows in order. A precondition is that the features dicts are in
order in a continuous game sense and that all rows are from the same game.
:features: A DataFrame with each row representing each play (in order).
:returns: A similar DataFrame but with 'team' and 'opp' columns added.
"""
features = features.to_dict('records')
curTm = curOpp = None
playAfterKickoff = False
# fill in team and opp columns
for row in features:
# if it's a kickoff or the play after a kickoff,
# figure out who has possession manually
if row['isKickoff'] or playAfterKickoff:
|
python
|
{
"resource": ""
}
|
q277555
|
_add_team_features
|
test
|
def _add_team_features(df):
"""Adds extra convenience features based on teams with and without
possession, with the precondition that the there are 'team' and 'opp'
specified in row.
:df: A DataFrame representing a game's play-by-play data after
_clean_features has been called and 'team' and 'opp' have been added by
_add_team_columns.
:returns: A dict with new features in addition to previous features.
"""
assert df.team.notnull().all()
homeOnOff = df['team'] == df['home']
# create column for distToGoal
df['distToGoal'] = np.where(df['team'] != df['fieldSide'],
df['ydLine'], 100 - df['ydLine'])
df['distToGoal'] = np.where(df['isXP'] | df['isTwoPoint'],
2, df['distToGoal'])
#
|
python
|
{
"resource": ""
}
|
q277556
|
initialWinProb
|
test
|
def initialWinProb(line):
"""Gets the initial win probability of a game given its Vegas line.
:line: The Vegas line from the home team's perspective (negative means
home team is favored).
:returns: A float in [0., 100.] that represents the win probability.
"""
line = float(line)
probWin = 1.
|
python
|
{
"resource": ""
}
|
q277557
|
Player.passing
|
test
|
def passing(self, kind='R'):
"""Gets yearly passing stats for the player.
:kind: One of 'R', 'P', or 'B'. Case-insensitive; defaults to 'R'.
:returns: Pandas DataFrame with passing stats.
"""
doc = self.get_doc()
table = (doc('table#passing')
|
python
|
{
"resource": ""
}
|
q277558
|
Player._simple_year_award
|
test
|
def _simple_year_award(self, award_id):
"""Template for simple award functions that simply list years, such as
pro bowls and first-team all pro.
:award_id: The div ID that is appended to "leaderboard_" in selecting
the table's div.
:returns: List of years for the award.
|
python
|
{
"resource": ""
}
|
q277559
|
Team.name
|
test
|
def name(self):
"""Returns the real name of the franchise given the team ID.
Examples:
'nwe' -> 'New England Patriots'
'sea' -> 'Seattle Seahawks'
:returns: A string corresponding to the team's full name.
"""
doc = self.get_main_doc()
|
python
|
{
"resource": ""
}
|
q277560
|
Team.boxscores
|
test
|
def boxscores(self, year):
"""Gets list of BoxScore objects corresponding to the box scores from
that year.
:year: The year for which we want the boxscores; defaults to current
year.
:returns: np.array of strings representing boxscore IDs.
"""
|
python
|
{
"resource": ""
}
|
q277561
|
Team._year_info_pq
|
test
|
def _year_info_pq(self, year, keyword):
"""Returns a PyQuery object containing the info from the meta div at
the top of the team year page with the given keyword.
:year: Int representing the season.
:keyword: A keyword to filter to a single p tag in the meta div.
:returns: A PyQuery object for the selected p element.
"""
doc = self.get_year_doc(year)
p_tags = doc('div#meta div:not(.logo) p')
texts = [p_tag.text_content().strip() for p_tag in p_tags]
try:
return next(
|
python
|
{
"resource": ""
}
|
q277562
|
Team.head_coaches_by_game
|
test
|
def head_coaches_by_game(self, year):
"""Returns head coach data by game.
:year: An int representing the season in question.
:returns: An array with an entry per game of the season that the team
played (including playoffs). Each entry is the head coach's ID for that
game in the season.
"""
coach_str = self._year_info_pq(year, 'Coach').text()
regex = r'(\S+?) \((\d+)-(\d+)-(\d+)\)'
coachAndTenure = []
m = True
while m:
m = re.search(regex, coach_str)
|
python
|
{
"resource": ""
}
|
q277563
|
Team.schedule
|
test
|
def schedule(self, year):
"""Returns a DataFrame with schedule information for the given year.
:year: The year for the season in question.
:returns: Pandas DataFrame with schedule information.
"""
doc = self.get_year_doc(year)
table = doc('table#games')
|
python
|
{
"resource": ""
}
|
q277564
|
Team.off_coordinator
|
test
|
def off_coordinator(self, year):
"""Returns the coach ID for the team's OC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the OC.
"""
try:
|
python
|
{
"resource": ""
}
|
q277565
|
Team.def_coordinator
|
test
|
def def_coordinator(self, year):
"""Returns the coach ID for the team's DC in a given year.
:year: An int representing the year.
:returns: A string containing the coach ID of the DC.
"""
try:
|
python
|
{
"resource": ""
}
|
q277566
|
Team.stadium
|
test
|
def stadium(self, year):
"""Returns the ID for the stadium in which the team played in a given
year.
:year: The year in question.
:returns: A string representing the stadium ID.
"""
anchor =
|
python
|
{
"resource": ""
}
|
q277567
|
Team.off_scheme
|
test
|
def off_scheme(self, year):
"""Returns the name of the offensive scheme the team ran in the given
year.
:year: Int representing the season year.
:returns: A string representing the offensive scheme.
"""
scheme_text = self._year_info_pq(year, 'Offensive Scheme').text()
m
|
python
|
{
"resource": ""
}
|
q277568
|
Team.def_alignment
|
test
|
def def_alignment(self, year):
"""Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
"""
scheme_text = self._year_info_pq(year, 'Defensive Alignment').text()
m
|
python
|
{
"resource": ""
}
|
q277569
|
Team.off_splits
|
test
|
def off_splits(self, year):
"""Returns a DataFrame of offensive team splits for a season.
:year: int representing the season.
:returns: Pandas DataFrame of split data.
"""
doc = self.get_year_doc('{}_splits'.format(year))
tables = doc('table.stats_table')
dfs = [sportsref.utils.parse_table(table) for table in tables.items()]
dfs =
|
python
|
{
"resource": ""
}
|
q277570
|
get_html
|
test
|
def get_html(url):
"""Gets the HTML for the given URL using a GET request.
:url: the absolute URL of the desired page.
:returns: a string of HTML.
"""
global last_request_time
with throttle_process_lock:
with throttle_thread_lock:
# sleep until THROTTLE_DELAY secs have passed since last request
wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value)
if wait_left > 0:
time.sleep(wait_left)
# make request
response = requests.get(url)
|
python
|
{
"resource": ""
}
|
q277571
|
flatten_links
|
test
|
def flatten_links(td, _recurse=False):
"""Flattens relative URLs within text of a table cell to IDs and returns
the result.
:td: the PyQuery object for the HTML to convert
:returns: the string with the links flattened to IDs
"""
# helper function to flatten individual strings/links
def _flatten_node(c):
if isinstance(c, basestring):
return c.strip()
elif 'href' in c.attrib:
c_id = rel_url_to_id(c.attrib['href'])
return c_id if c_id else c.text_content().strip()
|
python
|
{
"resource": ""
}
|
q277572
|
rel_url_to_id
|
test
|
def rel_url_to_id(url):
"""Converts a relative URL to a unique ID.
Here, 'ID' refers generally to the unique ID for a given 'type' that a
given datum has. For example, 'BradTo00' is Tom Brady's player ID - this
corresponds to his relative URL, '/players/B/BradTo00.htm'. Similarly,
'201409070dal' refers to the boxscore of the SF @ DAL game on 09/07/14.
Supported types:
* player/...
* boxscores/...
* teams/...
* years/...
* leagues/...
* awards/...
* coaches/...
* officials/...
* schools/...
* schools/high_schools.cgi?id=...
:returns: ID associated with the given relative URL.
"""
yearRegex = r'.*/years/(\d{4}).*|.*/gamelog/(\d{4}).*'
playerRegex = r'.*/players/(?:\w/)?(.+?)(?:/|\.html?)'
boxscoresRegex = r'.*/boxscores/(.+?)\.html?'
teamRegex = r'.*/teams/(\w{3})/.*'
coachRegex = r'.*/coaches/(.+?)\.html?'
stadiumRegex = r'.*/stadiums/(.+?)\.html?'
refRegex = r'.*/officials/(.+?r)\.html?'
collegeRegex = r'.*/schools/(\S+?)/.*|.*college=([^&]+)'
hsRegex = r'.*/schools/high_schools\.cgi\?id=([^\&]{8})'
bsDateRegex = r'.*/boxscores/index\.f?cgi\?(month=\d+&day=\d+&year=\d+)'
leagueRegex = r'.*/leagues/(.*_\d{4}).*'
awardRegex = r'.*/awards/(.+)\.htm'
regexes = [
|
python
|
{
"resource": ""
}
|
q277573
|
_kwargs_to_qs
|
test
|
def _kwargs_to_qs(**kwargs):
"""Converts kwargs given to PSF to a querystring.
:returns: the querystring.
"""
# start with defaults
inpOptDef = inputs_options_defaults()
opts = {
name: dct['value']
for name, dct in inpOptDef.items()
}
# clean up keys and values
for k, v in kwargs.items():
del kwargs[k]
# bool => 'Y'|'N'
if isinstance(v, bool):
kwargs[k] = 'Y' if v else 'N'
# tm, team => team_id
elif k.lower() in ('tm', 'team'):
kwargs['team_id'] = v
# yr, year, yrs, years => year_min, year_max
elif k.lower() in ('yr', 'year', 'yrs', 'years'):
if isinstance(v, collections.Iterable):
lst = list(v)
kwargs['year_min'] = min(lst)
kwargs['year_max'] = max(lst)
elif isinstance(v, basestring):
v = list(map(int, v.split(',')))
kwargs['year_min'] = min(v)
kwargs['year_max'] = max(v)
else:
kwargs['year_min'] = v
kwargs['year_max'] = v
# pos, position, positions => pos[]
elif k.lower() in ('pos', 'position', 'positions'):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['pos[]'] = v
# draft_pos, ... => draft_pos[]
elif k.lower() in (
'draft_pos', 'draftpos', 'draftposition', 'draftpositions',
'draft_position', 'draft_positions'
):
if isinstance(v, basestring):
v = v.split(',')
elif not isinstance(v, collections.Iterable):
v = [v]
kwargs['draft_pos[]'] = v
# if not one of these cases, put it back in kwargs
else:
|
python
|
{
"resource": ""
}
|
q277574
|
_Streamer__read_process
|
test
|
def _Streamer__read_process(self, path, read_size, cbuf, stop, barrier, cyclic, offset, read_skip, sync):
"""
Main function for the processes that read from the HDF5 file.
:param self: A reference to the streamer object that created these processes.
:param path: The HDF5 path to the node to be read from.
:param read_size: The length of the block along the outer dimension to read.
:param cbuf: The circular buffer to place read elements into.
:param stop: The Event that signals the process to stop reading.
:param barrier: The Barrier that synchonises read cycles.
:param cyclic: True if the process should read cyclically.
:param offset: Offset into the dataset that this process should start reading at.
:param read_skip: How many element to skip on each iteration.
:param sync: GuardSynchonizer to order writes to the buffer.
:return: Nothing
"""
# Multi-process access to HDF5 seems to behave better there are no top level imports of PyTables.
import tables as tb
h5_file = tb.open_file(self.filename, 'r', **self.h5_kw_args)
ary = h5_file.get_node(path)
i = offset
while not stop.is_set():
vals = ary[i:i + read_size]
# If the read goes off the end of the dataset, then wrap to the start.
if i + read_size > len(ary):
vals = np.concatenate([vals, ary[0:read_size - len(vals)]])
if sync is None:
# If no ordering is requested, then just write to the next available space in the buffer.
|
python
|
{
"resource": ""
}
|
q277575
|
SharedCircBuf.put_direct
|
test
|
def put_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is room to write into the buffer.
:return: A guard object that returns the buffer element.
"""
# Once the guard
|
python
|
{
"resource": ""
}
|
q277576
|
SharedCircBuf.get_direct
|
test
|
def get_direct(self):
"""
Allows direct access to the buffer element.
Blocks until there is data that can be read.
:return: A guard object that returns the buffer element.
"""
read_idx = self.__get_idx()
if read_idx is QueueClosed:
|
python
|
{
"resource": ""
}
|
q277577
|
SharedCircBuf.close
|
test
|
def close(self):
"""Close the queue, signalling that no more data can be put into the queue."""
|
python
|
{
"resource": ""
}
|
q277578
|
Streamer.__get_batch
|
test
|
def __get_batch(self, path, length, last=False):
"""
Get a block of data from the node at path.
:param path: The path to the node to read from.
:param length: The length along the outer dimension to read.
:param last: True if the remainder elements should be read.
:return: A copy of the requested block of data as a numpy array.
"""
import tables
h5_file = tables.open_file(self.filename, 'r')
h5_node = h5_file.get_node(path)
if len(h5_node) == 0:
raise
|
python
|
{
"resource": ""
}
|
q277579
|
Streamer.get_remainder
|
test
|
def get_remainder(self, path, block_size):
"""
Get the remainder elements. These elements will not be read in the direct queue access cyclic=False mode.
:param path: The HDF5 path to the dataset to be read.
:param block_size: The block size is used to calculate which elements will remain.
|
python
|
{
"resource": ""
}
|
q277580
|
Streamer.get_queue
|
test
|
def get_queue(self, path, n_procs=4, read_ahead=None, cyclic=False, block_size=None, ordered=False):
"""
Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the
block_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it
to the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements
will not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps
around the end and includes element from the beginning of the dataset. By default, blocks are returned in the
order in which they become available. The ordered option will force blocks to be returned in on-disk order.
:param path: The HDF5 path to the dataset that should be read.
:param n_procs: The number of background processes used to read the datset in parallel.
:param read_ahead: The number of blocks to allocate in the internal buffer.
:param cyclic: True if the queue should wrap at the end of the dataset.
:param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of
the chunk size, or to a 128KB sized block if the dataset is not chunked.
:param ordered: Force the reader return data in on-disk order. May result in performance penalty.
:return: A queue object that allows access to the internal buffer.
"""
# Get a block_size length of elements from the dataset to serve as a template for creating the buffer.
# If block_size=None, then get_batch calculates an appropriate block size.
example = self.__get_batch(path, block_size)
block_size = example.shape[0]
if read_ahead is None:
# 2x No. of processes for writing, 1 extra for reading.
read_ahead = 2*n_procs + 1
cbuf = SharedCircBuf(read_ahead, example)
stop = multiprocessing.Event()
barrier = Barrier(n_procs)
# If ordering has been requested, create a synchronizer.
sync = GuardSynchronizer() if ordered else None
procs = []
for i in range(n_procs):
# Each process is offset in the dataset by i*block_size
# The
|
python
|
{
"resource": ""
}
|
q277581
|
Streamer.get_generator
|
test
|
def get_generator(self, path, *args, **kw_args):
"""
Get a generator that allows convenient access to the streamed data.
Elements from the dataset are returned from the generator one row at a time.
Unlike the direct access queue, this generator also returns the remainder elements.
Additional arguments are forwarded to get_queue.
See the get_queue method for documentation of these parameters.
:param path:
:return: A generator that iterates over the rows in the dataset.
"""
q = self.get_queue(path=path, *args, **kw_args)
try:
|
python
|
{
"resource": ""
}
|
q277582
|
parse
|
test
|
def parse(ifp, pb_cls, **kwargs):
"""Parse a stream.
Args:
ifp (string or file-like object): input stream.
pb_cls (protobuf.message.Message.__class__): The class object of
the protobuf message type encoded in the stream.
"""
mode = 'rb'
if isinstance(ifp, str):
istream = open(ifp, mode=mode,
|
python
|
{
"resource": ""
}
|
q277583
|
dump
|
test
|
def dump(ofp, *pb_objs, **kwargs):
"""Write to a stream.
Args:
ofp (string or file-like object): output stream.
pb_objs (*protobuf.message.Message): list of protobuf message objects
to be written.
"""
mode = 'wb'
if isinstance(ofp,
|
python
|
{
"resource": ""
}
|
q277584
|
Stream._read_varint
|
test
|
def _read_varint(self):
"""Read a varint from file, parse it, and return the decoded integer.
"""
buff = self._fd.read(1)
if buff == b'':
return 0
while (bytearray(buff)[-1] & 0x80) >> 7 == 1: # while the MSB is 1
new_byte = self._fd.read(1)
|
python
|
{
"resource": ""
}
|
q277585
|
Stream._get_objs
|
test
|
def _get_objs(self):
"""A generator yielding all protobuf object data in the file. It is the
main parser of the stream encoding.
"""
while True:
count = self._read_varint()
if count == 0:
break
# Read a group containing `count` number of objects.
for _ in range(count):
size = self._read_varint()
if size == 0:
|
python
|
{
"resource": ""
}
|
q277586
|
Stream.close
|
test
|
def close(self):
"""Close the stream."""
self.flush()
if self._myfd is not None:
|
python
|
{
"resource": ""
}
|
q277587
|
Stream.write
|
test
|
def write(self, *pb2_obj):
"""Write a group of one or more protobuf objects to the file. Multiple
object groups can be written by calling this method several times
before closing stream or exiting the runtime context.
The input protobuf objects get buffered and will be written down when
the number of buffered objects exceed the `self._buffer_size`.
Args:
pb2_obj (*protobuf.message.Message): list of protobuf messages.
"""
base = len(self._write_buff)
for idx, obj in enumerate(pb2_obj):
|
python
|
{
"resource": ""
}
|
q277588
|
Stream.flush
|
test
|
def flush(self):
"""Write down buffer to the file."""
if not self.is_output():
return
count = len(self._write_buff)
if count == 0:
return
encodeVarint(self._fd.write, count, True)
for obj in self._write_buff:
|
python
|
{
"resource": ""
}
|
q277589
|
Game.get_game_dir
|
test
|
def get_game_dir(self, username=False):
"""Returns joined game directory path relative to Steamapps"""
if not self.common and not username:
raise RuntimeError("Can't determine this game's directory without username")
if self.common:
subdir = "common"
|
python
|
{
"resource": ""
}
|
q277590
|
TextInput.input_text_with_keyboard_emulation
|
test
|
def input_text_with_keyboard_emulation(self, text):
"""
Works around the problem of emulating user interactions with text inputs.
Emulates a key-down action on the first char of the input. This way, implementations which
require key-down event to trigger auto-suggest are testable.
|
python
|
{
"resource": ""
}
|
q277591
|
make_fake_movie
|
test
|
def make_fake_movie(nframes, mask_shape=(64, 64), mask_center=None,
bg_intensity=0.1, mask_sigma=10, dt=0.02, rate=1.0,
tau=1., sigma=0.001, seed=None):
"""
Generate 2D fake fluorescence movie
Arguments:
---------------------------------------------------------------------------
nframes: number of timebins to simulate
mask_shape: tuple (nrows, ncols), shape of a single movie frame
mask_center: tuple (x, y), pixel coords of cell center
bg_intensity: scalar, amplitude of (static) baseline fluorescence
mask_sigma: scalar, standard deviation of Gaussian mask
dt: timestep (s)
rate: mean spike rate (Hz)
tau: time constant of decay in calcium concentration (s)
sigma: SD of additive noise on fluorescence
seed: Seed for RNG
Returns:
---------------------------------------------------------------------------
F: fluorescence [npixels, nframes]
c: calcium concentration [nframes,]
n: spike train [nframes,]
theta: tuple of true model parameters:
(sigma, alpha, beta, lambda, gamma)
"""
gen = np.random.RandomState(seed)
|
python
|
{
"resource": ""
}
|
q277592
|
ElementWithTraits.evaluate_traits
|
test
|
def evaluate_traits(self):
"""
Evaluates traits and returns a list containing the description of traits which are not true.
Notice that if LAZY_EVALUATION is set to False all traits are evaluated before returning. Use this option
only for debugging purposes.
"""
return_value = []
for trait in self.traits:
if not trait.condition():
|
python
|
{
"resource": ""
}
|
q277593
|
Wait.until_condition
|
test
|
def until_condition(self, condition, condition_description):
"""
Waits until conditions is True or returns a non-None value.
If any of the trait is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
while True:
try:
if not hasattr(condition, '__call__'):
raise TypeError("condition is not callable")
value = condition()
if type(value) is bool and value is not False:
return value
elif type(value) is not bool and value is not None:
return value
else:
logger.debug("#" + str(count) + " - wait until " + condition_description) # pragma: no cover
except self._ignored_exceptions as ex:
|
python
|
{
"resource": ""
}
|
q277594
|
Wait.until_traits_are_present
|
test
|
def until_traits_are_present(self, element_with_traits):
"""
Waits until all traits are present.
If any of the traits is still not present after timeout, raises a TimeoutException.
"""
end_time = time.time() + self._timeout
count = 1
missing_traits_descriptions = None
while True:
missing_traits_descriptions = []
try:
missing_traits_descriptions = element_with_traits.evaluate_traits()
if len(missing_traits_descriptions) == 0:
return True
else:
logger.debug("#{0} - wait until all traits are present: <{1}>".format(str(count), '> <'.join(
missing_traits_descriptions)))
except self._ignored_exceptions as ex: # pragma: no cover
logger.debug("Captured {0}: {1}".format(str(ex.__class__).replace("<type '", "").replace("'>", ""),
|
python
|
{
"resource": ""
}
|
q277595
|
Wait.with_ignored_exceptions
|
test
|
def with_ignored_exceptions(self, *ignored_exceptions):
"""
Set a list of exceptions that should be ignored inside the
|
python
|
{
"resource": ""
}
|
q277596
|
NADReceiver.main_volume
|
test
|
def main_volume(self, operator, value=None):
"""
Execute Main.Volume.
Returns int
"""
|
python
|
{
"resource": ""
}
|
q277597
|
NADReceiver.main_source
|
test
|
def main_source(self, operator, value=None):
"""
Execute Main.Source.
Returns int
"""
try:
source = int(self.exec_command('main', 'source',
|
python
|
{
"resource": ""
}
|
q277598
|
NADReceiverTCP._send
|
test
|
def _send(self, message, read_reply=False):
"""Send a command string to the amplifier."""
sock = None
for tries in range(0, 3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._host, self.PORT))
break
except (ConnectionError, BrokenPipeError):
if tries == 3:
print("socket connect failed.")
return
sleep(0.1)
sock.send(codecs.decode(message, 'hex_codec'))
if read_reply:
sleep(0.1)
reply = ''
tries = 0
max_tries = 20
while len(reply) < len(message) and tries < max_tries:
|
python
|
{
"resource": ""
}
|
q277599
|
NADReceiverTCP.status
|
test
|
def status(self):
"""
Return the status of the device.
Returns a dictionary with keys 'volume' (int 0-200) , 'power' (bool),
'muted' (bool) and 'source' (str).
"""
nad_reply = self._send(self.POLL_VOLUME +
self.POLL_POWER +
self.POLL_MUTED +
self.POLL_SOURCE, read_reply=True)
if nad_reply is None:
return
# split reply into parts of 10 characters
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.