body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
4aa1b80cadb82ba229ab290bebcc4d8986d1218fe2c95eeeb9d265c94de6bbd2
def data_sorting(fname, keyword, limit=10, output_file=False, line_plot=False, bar_plot=False, save_fig=False): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'limit' video games are listed in the file and picture.\n \n Args:\n :param fname: string\n :param keyword: 'Genre', 'ESRB_Rating', 'Platform', 'Publisher', 'Developer'\n :param limit: integer, only show top 'limit' number of data\n Return:\n A sorted dataframe\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(keyword, str), 'keyword is not a string' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape keyword_type = list(df[keyword].value_counts().index) print(('There are %d keyword types' % len(keyword_type))) year_range = list(df['Year'].value_counts().index) print(('There are %d years' % len(year_range))) output = pd.DataFrame(0, index=year_range, columns=keyword_type).sort_index(axis=0) for i in range(nrow): output.loc[(df['Year'][i], df[keyword][i])] += df['Total_Shipped'][i] output['total'] = output.sum(axis=1) output = output.append(pd.Series(output.sum(axis=0), name='total')) output = output.sort_values(by='total', axis=1, ascending=False) output = output.drop(list(output)[(limit + 2):], axis=1) output['total'] = output.drop('total', axis=1).sum(axis=1) output = output.round(2) if output_file: output.to_csv(('../../../conf/video_games/output/vgsales-%s-year.csv' % keyword)) output.drop('total', axis=1, inplace=True) output.drop('total', axis=0, inplace=True) output.drop(output.columns[0], axis=1, inplace=True) ind = list(range(2004, 2019)) plt.rcParams.update({'font.size': 20}) if line_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) [plt.plot(output[i][:(- 2)], label=i, linewidth=5) for i in output.columns.values] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid() plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=25) plt.xlim(min(ind), max(ind)) plt.ylim(0, output.max().max()) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_line.png', bbox_inches='tight') elif bar_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) axes = [] agg_sum = np.zeros(len(ind)) for i in list(output.columns.values): axes.append(plt.bar(ind, output[i][:(- 2)], label=i, edgecolor='none', bottom=agg_sum, zorder=3)) agg_sum += output[i].values[:(- 2)] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid(axis='y', zorder=0) plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=20) plt.xlim((min(ind) - 1), (max(ind) + 1)) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_bar.png', bbox_inches='tight') return output
Sorting out the total sale of a certain type (keyword) video games each year. Only top 'limit' video games are listed in the file and picture. Args: :param fname: string :param keyword: 'Genre', 'ESRB_Rating', 'Platform', 'Publisher', 'Developer' :param limit: integer, only show top 'limit' number of data Return: A sorted dataframe
src/analytics/video_games/data_preprocessing.py
data_sorting
manjotms10/google-trends-analytics
6
python
def data_sorting(fname, keyword, limit=10, output_file=False, line_plot=False, bar_plot=False, save_fig=False): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'limit' video games are listed in the file and picture.\n \n Args:\n :param fname: string\n :param keyword: 'Genre', 'ESRB_Rating', 'Platform', 'Publisher', 'Developer'\n :param limit: integer, only show top 'limit' number of data\n Return:\n A sorted dataframe\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(keyword, str), 'keyword is not a string' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape keyword_type = list(df[keyword].value_counts().index) print(('There are %d keyword types' % len(keyword_type))) year_range = list(df['Year'].value_counts().index) print(('There are %d years' % len(year_range))) output = pd.DataFrame(0, index=year_range, columns=keyword_type).sort_index(axis=0) for i in range(nrow): output.loc[(df['Year'][i], df[keyword][i])] += df['Total_Shipped'][i] output['total'] = output.sum(axis=1) output = output.append(pd.Series(output.sum(axis=0), name='total')) output = output.sort_values(by='total', axis=1, ascending=False) output = output.drop(list(output)[(limit + 2):], axis=1) output['total'] = output.drop('total', axis=1).sum(axis=1) output = output.round(2) if output_file: output.to_csv(('../../../conf/video_games/output/vgsales-%s-year.csv' % keyword)) output.drop('total', axis=1, inplace=True) output.drop('total', axis=0, inplace=True) output.drop(output.columns[0], axis=1, inplace=True) ind = list(range(2004, 2019)) plt.rcParams.update({'font.size': 20}) if line_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) [plt.plot(output[i][:(- 2)], label=i, linewidth=5) for i in output.columns.values] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid() plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=25) plt.xlim(min(ind), max(ind)) plt.ylim(0, output.max().max()) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_line.png', bbox_inches='tight') elif bar_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) axes = [] agg_sum = np.zeros(len(ind)) for i in list(output.columns.values): axes.append(plt.bar(ind, output[i][:(- 2)], label=i, edgecolor='none', bottom=agg_sum, zorder=3)) agg_sum += output[i].values[:(- 2)] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid(axis='y', zorder=0) plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=20) plt.xlim((min(ind) - 1), (max(ind) + 1)) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_bar.png', bbox_inches='tight') return output
def data_sorting(fname, keyword, limit=10, output_file=False, line_plot=False, bar_plot=False, save_fig=False): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'limit' video games are listed in the file and picture.\n \n Args:\n :param fname: string\n :param keyword: 'Genre', 'ESRB_Rating', 'Platform', 'Publisher', 'Developer'\n :param limit: integer, only show top 'limit' number of data\n Return:\n A sorted dataframe\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(keyword, str), 'keyword is not a string' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape keyword_type = list(df[keyword].value_counts().index) print(('There are %d keyword types' % len(keyword_type))) year_range = list(df['Year'].value_counts().index) print(('There are %d years' % len(year_range))) output = pd.DataFrame(0, index=year_range, columns=keyword_type).sort_index(axis=0) for i in range(nrow): output.loc[(df['Year'][i], df[keyword][i])] += df['Total_Shipped'][i] output['total'] = output.sum(axis=1) output = output.append(pd.Series(output.sum(axis=0), name='total')) output = output.sort_values(by='total', axis=1, ascending=False) output = output.drop(list(output)[(limit + 2):], axis=1) output['total'] = output.drop('total', axis=1).sum(axis=1) output = output.round(2) if output_file: output.to_csv(('../../../conf/video_games/output/vgsales-%s-year.csv' % keyword)) output.drop('total', axis=1, inplace=True) output.drop('total', axis=0, inplace=True) output.drop(output.columns[0], axis=1, inplace=True) ind = list(range(2004, 2019)) plt.rcParams.update({'font.size': 20}) if line_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) [plt.plot(output[i][:(- 2)], label=i, linewidth=5) for i in output.columns.values] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid() plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=25) plt.xlim(min(ind), max(ind)) plt.ylim(0, output.max().max()) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_line.png', bbox_inches='tight') elif bar_plot: (fig, ax) = plt.subplots(figsize=(12, 6)) axes = [] agg_sum = np.zeros(len(ind)) for i in list(output.columns.values): axes.append(plt.bar(ind, output[i][:(- 2)], label=i, edgecolor='none', bottom=agg_sum, zorder=3)) agg_sum += output[i].values[:(- 2)] plt.legend(bbox_to_anchor=(1, 1), prop={'size': 15}, frameon=False) plt.grid(axis='y', zorder=0) plt.ylabel('Total Sales (millions)', fontsize=25) plt.xticks(ind, rotation=45) plt.yticks(fontsize=20) plt.xlim((min(ind) - 1), (max(ind) + 1)) ax.xaxis.set_major_formatter(FormatStrFormatter('%d')) if save_fig: plt.savefig(f'../../../saved_plots/vgsales-{keyword}-year_bar.png', bbox_inches='tight') return output<|docstring|>Sorting out the total sale of a certain type (keyword) video games each year. Only top 'limit' video games are listed in the file and picture. Args: :param fname: string :param keyword: 'Genre', 'ESRB_Rating', 'Platform', 'Publisher', 'Developer' :param limit: integer, only show top 'limit' number of data Return: A sorted dataframe<|endoftext|>
64b89f36af6954de712982338449e06bdae43b3fcb6f4b37993b400d92007397
def sale_history(fname, limit=10, month_aft=5, plot=False): "\n Returns sale history of top number (<='limit') of games from the data file. \n The sale history of selective games will be output to csv file and plotted.\n \n Args:\n :param fname: string\n :param limit: integer, output sale history of top 'limit' number of games\n :param month_aft: the specified number of months after release, including the release month\n :param plot: bool, if True, line plot is produced and saved\n Return:\n A dataframe that contains monthly sales of games\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(month_aft, int), 'month_aft is not a integer' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') week_aft = (month_aft * 4) df = df.loc[(df['rank of the week'] <= 30)] game_list = df.name.tolist() game_list = list(set(game_list)) msale_hist = pd.DataFrame(index=list(range((month_aft + 1)))) for game in game_list: wsale_hist = df.loc[(df['name'] == game)] wsale_hist = wsale_hist.iloc[::(- 1)] wsale_hist.reset_index(inplace=True, drop=True) temp = wsale_hist['week after release'] if ((len(temp) >= week_aft) and all((temp[:20] == list(range(1, 21))))): j = 0 msale_hist[game] = 0 for i in range((month_aft * 4)): if ((i % 4) == 0): j += 1 week_sale = int(wsale_hist['weekly sales'][i].replace(',', '')) msale_hist[game][j] += week_sale if (len(msale_hist.columns.to_list()) > limit): msale_hist = msale_hist.iloc[(:, :limit)] msale_hist.swapaxes('index', 'columns').to_csv('../../../conf/video_games/output/vgsales-game-sale-history.csv') print(msale_hist) if plot: plt.rcParams.update({'font.size': 18}) plt.figure(figsize=(12, 6)) [plt.plot(msale_hist[game][:(month_aft + 1)], label=game) for game in msale_hist.columns.to_list()] plt.legend(bbox_to_anchor=(1, 1), fontsize=12) plt.grid() plt.xlabel('Months after release') plt.ylabel('Monthly sales') plt.xticks(np.arange(6)) plt.savefig(f'../../../saved_plots/vgsales-game-sale-history.png', bbox_inches='tight') return msale_hist
Returns sale history of top number (<='limit') of games from the data file. The sale history of selective games will be output to csv file and plotted. Args: :param fname: string :param limit: integer, output sale history of top 'limit' number of games :param month_aft: the specified number of months after release, including the release month :param plot: bool, if True, line plot is produced and saved Return: A dataframe that contains monthly sales of games
src/analytics/video_games/data_preprocessing.py
sale_history
manjotms10/google-trends-analytics
6
python
def sale_history(fname, limit=10, month_aft=5, plot=False): "\n Returns sale history of top number (<='limit') of games from the data file. \n The sale history of selective games will be output to csv file and plotted.\n \n Args:\n :param fname: string\n :param limit: integer, output sale history of top 'limit' number of games\n :param month_aft: the specified number of months after release, including the release month\n :param plot: bool, if True, line plot is produced and saved\n Return:\n A dataframe that contains monthly sales of games\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(month_aft, int), 'month_aft is not a integer' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') week_aft = (month_aft * 4) df = df.loc[(df['rank of the week'] <= 30)] game_list = df.name.tolist() game_list = list(set(game_list)) msale_hist = pd.DataFrame(index=list(range((month_aft + 1)))) for game in game_list: wsale_hist = df.loc[(df['name'] == game)] wsale_hist = wsale_hist.iloc[::(- 1)] wsale_hist.reset_index(inplace=True, drop=True) temp = wsale_hist['week after release'] if ((len(temp) >= week_aft) and all((temp[:20] == list(range(1, 21))))): j = 0 msale_hist[game] = 0 for i in range((month_aft * 4)): if ((i % 4) == 0): j += 1 week_sale = int(wsale_hist['weekly sales'][i].replace(',', )) msale_hist[game][j] += week_sale if (len(msale_hist.columns.to_list()) > limit): msale_hist = msale_hist.iloc[(:, :limit)] msale_hist.swapaxes('index', 'columns').to_csv('../../../conf/video_games/output/vgsales-game-sale-history.csv') print(msale_hist) if plot: plt.rcParams.update({'font.size': 18}) plt.figure(figsize=(12, 6)) [plt.plot(msale_hist[game][:(month_aft + 1)], label=game) for game in msale_hist.columns.to_list()] plt.legend(bbox_to_anchor=(1, 1), fontsize=12) plt.grid() plt.xlabel('Months after release') plt.ylabel('Monthly sales') plt.xticks(np.arange(6)) plt.savefig(f'../../../saved_plots/vgsales-game-sale-history.png', bbox_inches='tight') return msale_hist
def sale_history(fname, limit=10, month_aft=5, plot=False): "\n Returns sale history of top number (<='limit') of games from the data file. \n The sale history of selective games will be output to csv file and plotted.\n \n Args:\n :param fname: string\n :param limit: integer, output sale history of top 'limit' number of games\n :param month_aft: the specified number of months after release, including the release month\n :param plot: bool, if True, line plot is produced and saved\n Return:\n A dataframe that contains monthly sales of games\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(month_aft, int), 'month_aft is not a integer' assert isinstance(limit, int), 'limit is not a integer' df = pd.read_csv(fname, delimiter=',') week_aft = (month_aft * 4) df = df.loc[(df['rank of the week'] <= 30)] game_list = df.name.tolist() game_list = list(set(game_list)) msale_hist = pd.DataFrame(index=list(range((month_aft + 1)))) for game in game_list: wsale_hist = df.loc[(df['name'] == game)] wsale_hist = wsale_hist.iloc[::(- 1)] wsale_hist.reset_index(inplace=True, drop=True) temp = wsale_hist['week after release'] if ((len(temp) >= week_aft) and all((temp[:20] == list(range(1, 21))))): j = 0 msale_hist[game] = 0 for i in range((month_aft * 4)): if ((i % 4) == 0): j += 1 week_sale = int(wsale_hist['weekly sales'][i].replace(',', )) msale_hist[game][j] += week_sale if (len(msale_hist.columns.to_list()) > limit): msale_hist = msale_hist.iloc[(:, :limit)] msale_hist.swapaxes('index', 'columns').to_csv('../../../conf/video_games/output/vgsales-game-sale-history.csv') print(msale_hist) if plot: plt.rcParams.update({'font.size': 18}) plt.figure(figsize=(12, 6)) [plt.plot(msale_hist[game][:(month_aft + 1)], label=game) for game in msale_hist.columns.to_list()] plt.legend(bbox_to_anchor=(1, 1), fontsize=12) plt.grid() plt.xlabel('Months after release') plt.ylabel('Monthly sales') plt.xticks(np.arange(6)) plt.savefig(f'../../../saved_plots/vgsales-game-sale-history.png', bbox_inches='tight') return msale_hist<|docstring|>Returns sale history of top number (<='limit') of games from the data file. The sale history of selective games will be output to csv file and plotted. Args: :param fname: string :param limit: integer, output sale history of top 'limit' number of games :param month_aft: the specified number of months after release, including the release month :param plot: bool, if True, line plot is produced and saved Return: A dataframe that contains monthly sales of games<|endoftext|>
cb3e830424ebfec6f82a5e218b2dc0e3941c9700f1c07e0bceecdbc7f919f7d2
def keyword_data_sorting(fname, year=[], genre=[], esrb_rating=[], platform=[], publisher=[], developer=[], top=1): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'top' video games are listed in the file and plots.\n \n Args:\n :param fname: string\n :param year: list of years (int)\n :param genre: list of genres (string)\n :param esrb_rating: list of esrb_rating (string)\n :param platform: list of platforms (string)\n :param publisher: list of publishers (string)\n :param developer: list of developers (string)\n :param top: integer, only show top 'limit' number of data\n Retrun:\n A dataframe sorted by specified keywords\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(year, list), 'year is not a list' assert isinstance(genre, list), 'genre is not a list' assert isinstance(esrb_rating, list), 'esrb_rating is not a list' assert isinstance(platform, list), 'platform is not a list' assert isinstance(publisher, list), 'publisher is not a list' assert isinstance(developer, list), 'developer is not a list' assert isinstance(top, int), 'top is not a int' for (i, j) in enumerate(year): assert isinstance(j, int), f'{i} component in year is not integer' for (i, j) in enumerate(genre): assert isinstance(j, str), f'{i} component in genre is not string' for (i, j) in enumerate(esrb_rating): assert isinstance(j, str), f'{i} component in esrb_rating is not string' for (i, j) in enumerate(platform): assert isinstance(j, str), f'{i} component in platform is not string' for (i, j) in enumerate(publisher): assert isinstance(j, str), f'{i} component in publisher is not string' for (i, j) in enumerate(developer): assert isinstance(j, str), f'{i} component in developer is not string' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape df['Year'] = df['Year'].astype('int') for i in range(nrow): df.loc[(i, 'Name')] = df.loc[(i, 'Name')].translate(str.maketrans('', '', string.punctuation)) for i in range(nrow): if (year and (df['Year'][i] not in year)): df.drop(index=i, inplace=True) elif (genre and (df['Genre'][i] not in genre)): df.drop(index=i, inplace=True) elif (esrb_rating and (df['ESRB_Rating'][i] not in esrb_rating)): df.drop(index=i, inplace=True) elif (platform and (df['Platform'][i] not in platform)): df.drop(index=i, inplace=True) elif (publisher and (df['Publisher'][i] not in publisher)): df.drop(index=i, inplace=True) elif (developer and (df['Developer'][i] not in developer)): df.drop(index=i, inplace=True) assert (not df.empty), 'No video game satisfy this criteria' output_df = pd.DataFrame(index=list(set(df['Name'])), columns=['Normalized Sales Volume']) output_df['Normalized Sales Volume'] = 0 (nrow, ncol) = df.shape for i in range(nrow): output_df.loc[(df.iloc[(i, 1)], 'Normalized Sales Volume')] += df.iloc[(i, 9)] output_df.sort_values(by='Normalized Sales Volume', ascending=False, inplace=True) (nrow, ncol) = output_df.shape assert (nrow >= top), ('Only %d video game satisfy this criteria, please check input "top"' % nrow) output_df.drop(index=output_df.index[top:], inplace=True) max_sale = max(output_df['Normalized Sales Volume']) for i in range(top): output_df.iloc[(i, 0)] = ((output_df.iloc[(i, 0)] / max_sale) * 100) return output_df
Sorting out the total sale of a certain type (keyword) video games each year. Only top 'top' video games are listed in the file and plots. Args: :param fname: string :param year: list of years (int) :param genre: list of genres (string) :param esrb_rating: list of esrb_rating (string) :param platform: list of platforms (string) :param publisher: list of publishers (string) :param developer: list of developers (string) :param top: integer, only show top 'limit' number of data Retrun: A dataframe sorted by specified keywords
src/analytics/video_games/data_preprocessing.py
keyword_data_sorting
manjotms10/google-trends-analytics
6
python
def keyword_data_sorting(fname, year=[], genre=[], esrb_rating=[], platform=[], publisher=[], developer=[], top=1): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'top' video games are listed in the file and plots.\n \n Args:\n :param fname: string\n :param year: list of years (int)\n :param genre: list of genres (string)\n :param esrb_rating: list of esrb_rating (string)\n :param platform: list of platforms (string)\n :param publisher: list of publishers (string)\n :param developer: list of developers (string)\n :param top: integer, only show top 'limit' number of data\n Retrun:\n A dataframe sorted by specified keywords\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(year, list), 'year is not a list' assert isinstance(genre, list), 'genre is not a list' assert isinstance(esrb_rating, list), 'esrb_rating is not a list' assert isinstance(platform, list), 'platform is not a list' assert isinstance(publisher, list), 'publisher is not a list' assert isinstance(developer, list), 'developer is not a list' assert isinstance(top, int), 'top is not a int' for (i, j) in enumerate(year): assert isinstance(j, int), f'{i} component in year is not integer' for (i, j) in enumerate(genre): assert isinstance(j, str), f'{i} component in genre is not string' for (i, j) in enumerate(esrb_rating): assert isinstance(j, str), f'{i} component in esrb_rating is not string' for (i, j) in enumerate(platform): assert isinstance(j, str), f'{i} component in platform is not string' for (i, j) in enumerate(publisher): assert isinstance(j, str), f'{i} component in publisher is not string' for (i, j) in enumerate(developer): assert isinstance(j, str), f'{i} component in developer is not string' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape df['Year'] = df['Year'].astype('int') for i in range(nrow): df.loc[(i, 'Name')] = df.loc[(i, 'Name')].translate(str.maketrans(, , string.punctuation)) for i in range(nrow): if (year and (df['Year'][i] not in year)): df.drop(index=i, inplace=True) elif (genre and (df['Genre'][i] not in genre)): df.drop(index=i, inplace=True) elif (esrb_rating and (df['ESRB_Rating'][i] not in esrb_rating)): df.drop(index=i, inplace=True) elif (platform and (df['Platform'][i] not in platform)): df.drop(index=i, inplace=True) elif (publisher and (df['Publisher'][i] not in publisher)): df.drop(index=i, inplace=True) elif (developer and (df['Developer'][i] not in developer)): df.drop(index=i, inplace=True) assert (not df.empty), 'No video game satisfy this criteria' output_df = pd.DataFrame(index=list(set(df['Name'])), columns=['Normalized Sales Volume']) output_df['Normalized Sales Volume'] = 0 (nrow, ncol) = df.shape for i in range(nrow): output_df.loc[(df.iloc[(i, 1)], 'Normalized Sales Volume')] += df.iloc[(i, 9)] output_df.sort_values(by='Normalized Sales Volume', ascending=False, inplace=True) (nrow, ncol) = output_df.shape assert (nrow >= top), ('Only %d video game satisfy this criteria, please check input "top"' % nrow) output_df.drop(index=output_df.index[top:], inplace=True) max_sale = max(output_df['Normalized Sales Volume']) for i in range(top): output_df.iloc[(i, 0)] = ((output_df.iloc[(i, 0)] / max_sale) * 100) return output_df
def keyword_data_sorting(fname, year=[], genre=[], esrb_rating=[], platform=[], publisher=[], developer=[], top=1): "\n Sorting out the total sale of a certain type (keyword) video games each year.\n Only top 'top' video games are listed in the file and plots.\n \n Args:\n :param fname: string\n :param year: list of years (int)\n :param genre: list of genres (string)\n :param esrb_rating: list of esrb_rating (string)\n :param platform: list of platforms (string)\n :param publisher: list of publishers (string)\n :param developer: list of developers (string)\n :param top: integer, only show top 'limit' number of data\n Retrun:\n A dataframe sorted by specified keywords\n " assert isinstance(fname, str), 'fname is not a string' assert isinstance(year, list), 'year is not a list' assert isinstance(genre, list), 'genre is not a list' assert isinstance(esrb_rating, list), 'esrb_rating is not a list' assert isinstance(platform, list), 'platform is not a list' assert isinstance(publisher, list), 'publisher is not a list' assert isinstance(developer, list), 'developer is not a list' assert isinstance(top, int), 'top is not a int' for (i, j) in enumerate(year): assert isinstance(j, int), f'{i} component in year is not integer' for (i, j) in enumerate(genre): assert isinstance(j, str), f'{i} component in genre is not string' for (i, j) in enumerate(esrb_rating): assert isinstance(j, str), f'{i} component in esrb_rating is not string' for (i, j) in enumerate(platform): assert isinstance(j, str), f'{i} component in platform is not string' for (i, j) in enumerate(publisher): assert isinstance(j, str), f'{i} component in publisher is not string' for (i, j) in enumerate(developer): assert isinstance(j, str), f'{i} component in developer is not string' df = pd.read_csv(fname, delimiter=',') (nrow, ncol) = df.shape df['Year'] = df['Year'].astype('int') for i in range(nrow): df.loc[(i, 'Name')] = df.loc[(i, 'Name')].translate(str.maketrans(, , string.punctuation)) for i in range(nrow): if (year and (df['Year'][i] not in year)): df.drop(index=i, inplace=True) elif (genre and (df['Genre'][i] not in genre)): df.drop(index=i, inplace=True) elif (esrb_rating and (df['ESRB_Rating'][i] not in esrb_rating)): df.drop(index=i, inplace=True) elif (platform and (df['Platform'][i] not in platform)): df.drop(index=i, inplace=True) elif (publisher and (df['Publisher'][i] not in publisher)): df.drop(index=i, inplace=True) elif (developer and (df['Developer'][i] not in developer)): df.drop(index=i, inplace=True) assert (not df.empty), 'No video game satisfy this criteria' output_df = pd.DataFrame(index=list(set(df['Name'])), columns=['Normalized Sales Volume']) output_df['Normalized Sales Volume'] = 0 (nrow, ncol) = df.shape for i in range(nrow): output_df.loc[(df.iloc[(i, 1)], 'Normalized Sales Volume')] += df.iloc[(i, 9)] output_df.sort_values(by='Normalized Sales Volume', ascending=False, inplace=True) (nrow, ncol) = output_df.shape assert (nrow >= top), ('Only %d video game satisfy this criteria, please check input "top"' % nrow) output_df.drop(index=output_df.index[top:], inplace=True) max_sale = max(output_df['Normalized Sales Volume']) for i in range(top): output_df.iloc[(i, 0)] = ((output_df.iloc[(i, 0)] / max_sale) * 100) return output_df<|docstring|>Sorting out the total sale of a certain type (keyword) video games each year. Only top 'top' video games are listed in the file and plots. Args: :param fname: string :param year: list of years (int) :param genre: list of genres (string) :param esrb_rating: list of esrb_rating (string) :param platform: list of platforms (string) :param publisher: list of publishers (string) :param developer: list of developers (string) :param top: integer, only show top 'limit' number of data Retrun: A dataframe sorted by specified keywords<|endoftext|>
7c6f9e6baa4b778bd10505c1cb981fab1314b96eafe8faae0ebfcb5d426225d9
@pytest.fixture(scope='session') def tasks_just_a_few(): 'All summaries and owners are unique.' return (Task('Write some code', 'Brian', True), Task("Code review Brian's code", 'Katie', False), Task('Fix what Brian did', 'Anna', False))
All summaries and owners are unique.
master/bopytest-code/code/ch5/d/tasks_proj/tests/conftest.py
tasks_just_a_few
AlexRogalskiy/DevArtifacts
4
python
@pytest.fixture(scope='session') def tasks_just_a_few(): return (Task('Write some code', 'Brian', True), Task("Code review Brian's code", 'Katie', False), Task('Fix what Brian did', 'Anna', False))
@pytest.fixture(scope='session') def tasks_just_a_few(): return (Task('Write some code', 'Brian', True), Task("Code review Brian's code", 'Katie', False), Task('Fix what Brian did', 'Anna', False))<|docstring|>All summaries and owners are unique.<|endoftext|>
6c09a2d98def6da9db3748071d8839262a42804f650fe11b5f59045cec0e0271
@pytest.fixture(scope='session') def tasks_mult_per_owner(): 'Several owners with several tasks each.' return (Task('Make a cookie', 'Raphael'), Task('Use an emoji', 'Raphael'), Task('Move to Berlin', 'Raphael'), Task('Teach people', 'Carrie'), Task('Make some videos', 'Carrie'), Task('Inspire', 'Carrie'), Task('Do a handstand', 'Daniel'), Task('Write some books', 'Daniel'), Task('Eat ice cream', 'Daniel'))
Several owners with several tasks each.
master/bopytest-code/code/ch5/d/tasks_proj/tests/conftest.py
tasks_mult_per_owner
AlexRogalskiy/DevArtifacts
4
python
@pytest.fixture(scope='session') def tasks_mult_per_owner(): return (Task('Make a cookie', 'Raphael'), Task('Use an emoji', 'Raphael'), Task('Move to Berlin', 'Raphael'), Task('Teach people', 'Carrie'), Task('Make some videos', 'Carrie'), Task('Inspire', 'Carrie'), Task('Do a handstand', 'Daniel'), Task('Write some books', 'Daniel'), Task('Eat ice cream', 'Daniel'))
@pytest.fixture(scope='session') def tasks_mult_per_owner(): return (Task('Make a cookie', 'Raphael'), Task('Use an emoji', 'Raphael'), Task('Move to Berlin', 'Raphael'), Task('Teach people', 'Carrie'), Task('Make some videos', 'Carrie'), Task('Inspire', 'Carrie'), Task('Do a handstand', 'Daniel'), Task('Write some books', 'Daniel'), Task('Eat ice cream', 'Daniel'))<|docstring|>Several owners with several tasks each.<|endoftext|>
3e823a7031e4398575fa8d0a77e2a35be2212b9a9916bea60365e12d63fa04c7
@pytest.fixture() def tasks_db(tasks_db_session): 'an empty tasks db' tasks.delete_all()
an empty tasks db
master/bopytest-code/code/ch5/d/tasks_proj/tests/conftest.py
tasks_db
AlexRogalskiy/DevArtifacts
4
python
@pytest.fixture() def tasks_db(tasks_db_session): tasks.delete_all()
@pytest.fixture() def tasks_db(tasks_db_session): tasks.delete_all()<|docstring|>an empty tasks db<|endoftext|>
ea7bee143a6bbfb04be4644ec8c3dd22b013486564e8f9a36a214c0146e73006
@pytest.fixture() def db_with_3_tasks(tasks_db, tasks_just_a_few): 'tasks db with 3 tasks, all unique' for t in tasks_just_a_few: tasks.add(t)
tasks db with 3 tasks, all unique
master/bopytest-code/code/ch5/d/tasks_proj/tests/conftest.py
db_with_3_tasks
AlexRogalskiy/DevArtifacts
4
python
@pytest.fixture() def db_with_3_tasks(tasks_db, tasks_just_a_few): for t in tasks_just_a_few: tasks.add(t)
@pytest.fixture() def db_with_3_tasks(tasks_db, tasks_just_a_few): for t in tasks_just_a_few: tasks.add(t)<|docstring|>tasks db with 3 tasks, all unique<|endoftext|>
c2f35727fa5cf3ef951b86c1fc11ac6a59c7a98c7f4a75d639eaa7585764d200
@pytest.fixture() def db_with_multi_per_owner(tasks_db, tasks_mult_per_owner): 'tasks db 3 owners, all with 3 tasks' for t in tasks_mult_per_owner: tasks.add(t)
tasks db 3 owners, all with 3 tasks
master/bopytest-code/code/ch5/d/tasks_proj/tests/conftest.py
db_with_multi_per_owner
AlexRogalskiy/DevArtifacts
4
python
@pytest.fixture() def db_with_multi_per_owner(tasks_db, tasks_mult_per_owner): for t in tasks_mult_per_owner: tasks.add(t)
@pytest.fixture() def db_with_multi_per_owner(tasks_db, tasks_mult_per_owner): for t in tasks_mult_per_owner: tasks.add(t)<|docstring|>tasks db 3 owners, all with 3 tasks<|endoftext|>
c073bd091fac59eb185a4565f69011618d69273d5f4e371d8f63cba8689a704c
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True)) def test_logged_data_is_json_serializable(callback_cls: Type[Callback]): 'Test that all logged data is json serializable, which is a requirement to use wandb.' pytest.importorskip('wandb', reason='wandb is optional') from wandb.sdk.data_types.base_types.wb_value import WBValue callback_kwargs = get_cb_kwargs(callback_cls) callback = callback_cls(**callback_kwargs) logger = InMemoryLogger() trainer = Trainer(model=SimpleModel(), train_dataloader=DataLoader(RandomClassificationDataset()), train_subset_num_batches=2, max_duration='1ep', callbacks=callback, loggers=logger, compute_training_metrics=True) trainer.fit() for log_calls in logger.data.values(): for (timestamp, log_level, data) in log_calls: del timestamp, log_level if isinstance(data, (WBValue, torch.Tensor)): continue json.dumps(data)
Test that all logged data is json serializable, which is a requirement to use wandb.
tests/loggers/test_wandb_logger.py
test_logged_data_is_json_serializable
growlix/composer
0
python
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True)) def test_logged_data_is_json_serializable(callback_cls: Type[Callback]): pytest.importorskip('wandb', reason='wandb is optional') from wandb.sdk.data_types.base_types.wb_value import WBValue callback_kwargs = get_cb_kwargs(callback_cls) callback = callback_cls(**callback_kwargs) logger = InMemoryLogger() trainer = Trainer(model=SimpleModel(), train_dataloader=DataLoader(RandomClassificationDataset()), train_subset_num_batches=2, max_duration='1ep', callbacks=callback, loggers=logger, compute_training_metrics=True) trainer.fit() for log_calls in logger.data.values(): for (timestamp, log_level, data) in log_calls: del timestamp, log_level if isinstance(data, (WBValue, torch.Tensor)): continue json.dumps(data)
@pytest.mark.parametrize('callback_cls', get_cbs_and_marks(callbacks=True)) def test_logged_data_is_json_serializable(callback_cls: Type[Callback]): pytest.importorskip('wandb', reason='wandb is optional') from wandb.sdk.data_types.base_types.wb_value import WBValue callback_kwargs = get_cb_kwargs(callback_cls) callback = callback_cls(**callback_kwargs) logger = InMemoryLogger() trainer = Trainer(model=SimpleModel(), train_dataloader=DataLoader(RandomClassificationDataset()), train_subset_num_batches=2, max_duration='1ep', callbacks=callback, loggers=logger, compute_training_metrics=True) trainer.fit() for log_calls in logger.data.values(): for (timestamp, log_level, data) in log_calls: del timestamp, log_level if isinstance(data, (WBValue, torch.Tensor)): continue json.dumps(data)<|docstring|>Test that all logged data is json serializable, which is a requirement to use wandb.<|endoftext|>
9f4381653e7c45d024a0329c0c9ca5a5538ec3deefb1e3e33a5ba1339ff3dfb7
def testJson(self): 'Load in a json string' s = '{"name": "test_job", "range": "1-10", "layers": [{"name": "layer_1", "module": "outline.modules.shell.Shell", "env": {"LAYER_KEY1": "LAYER_VALUE1"}, "command": ["/bin/ls"]}]}' ol = outline.load_json(s) self.assertEqual('test_job', ol.get_name()) self.assertEqual('1-10', ol.get_frame_range()) self.assertEqual('LAYER_VALUE1', ol.get_layer('layer_1').get_env('LAYER_KEY1')) ol.get_layer('layer_1').set_env('LAYER_KEY2', 'LAYER_VALUE2') l = outline.cuerun.OutlineLauncher(ol) root = Et.fromstring(l.serialize()) env1 = root.find('job/layers/layer/env/key[@name="LAYER_KEY1"]') self.assertEqual('LAYER_VALUE1', env1.text) env2 = root.find('job/layers/layer/env/key[@name="LAYER_KEY2"]') self.assertEqual('LAYER_VALUE2', env2.text)
Load in a json string
pyoutline/tests/json_test.py
testJson
jkellefiel4/OpenCue
334
python
def testJson(self): s = '{"name": "test_job", "range": "1-10", "layers": [{"name": "layer_1", "module": "outline.modules.shell.Shell", "env": {"LAYER_KEY1": "LAYER_VALUE1"}, "command": ["/bin/ls"]}]}' ol = outline.load_json(s) self.assertEqual('test_job', ol.get_name()) self.assertEqual('1-10', ol.get_frame_range()) self.assertEqual('LAYER_VALUE1', ol.get_layer('layer_1').get_env('LAYER_KEY1')) ol.get_layer('layer_1').set_env('LAYER_KEY2', 'LAYER_VALUE2') l = outline.cuerun.OutlineLauncher(ol) root = Et.fromstring(l.serialize()) env1 = root.find('job/layers/layer/env/key[@name="LAYER_KEY1"]') self.assertEqual('LAYER_VALUE1', env1.text) env2 = root.find('job/layers/layer/env/key[@name="LAYER_KEY2"]') self.assertEqual('LAYER_VALUE2', env2.text)
def testJson(self): s = '{"name": "test_job", "range": "1-10", "layers": [{"name": "layer_1", "module": "outline.modules.shell.Shell", "env": {"LAYER_KEY1": "LAYER_VALUE1"}, "command": ["/bin/ls"]}]}' ol = outline.load_json(s) self.assertEqual('test_job', ol.get_name()) self.assertEqual('1-10', ol.get_frame_range()) self.assertEqual('LAYER_VALUE1', ol.get_layer('layer_1').get_env('LAYER_KEY1')) ol.get_layer('layer_1').set_env('LAYER_KEY2', 'LAYER_VALUE2') l = outline.cuerun.OutlineLauncher(ol) root = Et.fromstring(l.serialize()) env1 = root.find('job/layers/layer/env/key[@name="LAYER_KEY1"]') self.assertEqual('LAYER_VALUE1', env1.text) env2 = root.find('job/layers/layer/env/key[@name="LAYER_KEY2"]') self.assertEqual('LAYER_VALUE2', env2.text)<|docstring|>Load in a json string<|endoftext|>
9893a2f416853501e7af40eff4e1af49758c6da4d3f664833139662161ce519b
@mock.patch('outline.layer.Layer.system') @mock.patch.dict(os.environ, {}, clear=True) def testJsonFile(self, systemMock): 'Load JSON from a file' with open(os.path.join(JSON_DIR, 'shell.outline')) as fp: ol = outline.load_json(fp.read()) with test_utils.TemporarySessionDirectory(): ol.setup() layer = ol.get_layer('shell_layer') self.assertEqual('LAYER_VALUE', layer.get_env('LAYER_KEY')) layer.execute('1000') systemMock.assert_has_calls([mock.call(['/bin/ls'], frame=1000)]) self.assertEqual('LAYER_VALUE', os.environ['LAYER_KEY'])
Load JSON from a file
pyoutline/tests/json_test.py
testJsonFile
jkellefiel4/OpenCue
334
python
@mock.patch('outline.layer.Layer.system') @mock.patch.dict(os.environ, {}, clear=True) def testJsonFile(self, systemMock): with open(os.path.join(JSON_DIR, 'shell.outline')) as fp: ol = outline.load_json(fp.read()) with test_utils.TemporarySessionDirectory(): ol.setup() layer = ol.get_layer('shell_layer') self.assertEqual('LAYER_VALUE', layer.get_env('LAYER_KEY')) layer.execute('1000') systemMock.assert_has_calls([mock.call(['/bin/ls'], frame=1000)]) self.assertEqual('LAYER_VALUE', os.environ['LAYER_KEY'])
@mock.patch('outline.layer.Layer.system') @mock.patch.dict(os.environ, {}, clear=True) def testJsonFile(self, systemMock): with open(os.path.join(JSON_DIR, 'shell.outline')) as fp: ol = outline.load_json(fp.read()) with test_utils.TemporarySessionDirectory(): ol.setup() layer = ol.get_layer('shell_layer') self.assertEqual('LAYER_VALUE', layer.get_env('LAYER_KEY')) layer.execute('1000') systemMock.assert_has_calls([mock.call(['/bin/ls'], frame=1000)]) self.assertEqual('LAYER_VALUE', os.environ['LAYER_KEY'])<|docstring|>Load JSON from a file<|endoftext|>
eb7520768fd99a428477de94988504c04444afd53ded7b940b07426fa3193851
def testFacility(self): 'Test facility from JSON' with open(os.path.join(JSON_DIR, 'facility.json')) as fp: ol = outline.load_json(fp.read()) self.assertEqual('test_facility', ol.get_facility())
Test facility from JSON
pyoutline/tests/json_test.py
testFacility
jkellefiel4/OpenCue
334
python
def testFacility(self): with open(os.path.join(JSON_DIR, 'facility.json')) as fp: ol = outline.load_json(fp.read()) self.assertEqual('test_facility', ol.get_facility())
def testFacility(self): with open(os.path.join(JSON_DIR, 'facility.json')) as fp: ol = outline.load_json(fp.read()) self.assertEqual('test_facility', ol.get_facility())<|docstring|>Test facility from JSON<|endoftext|>
fd54ea6ec722fb48c60af271a9212fd63496348fa82204e6274d1aabf5ac0020
async def watch_config(self, data_id, group, tenant): '监听nacos中的配置' while 1: res = (await self.__post_config_check(data_id, group, tenant)) if res: (await self.__get_config(data_id, group, tenant))
监听nacos中的配置
aio_nacos/nacos_config.py
watch_config
zqsc/aio-nacos
2
python
async def watch_config(self, data_id, group, tenant): while 1: res = (await self.__post_config_check(data_id, group, tenant)) if res: (await self.__get_config(data_id, group, tenant))
async def watch_config(self, data_id, group, tenant): while 1: res = (await self.__post_config_check(data_id, group, tenant)) if res: (await self.__get_config(data_id, group, tenant))<|docstring|>监听nacos中的配置<|endoftext|>
906d84db20fcbe9ea506651187501c94bed9e47f274503f5ee54027aac811625
async def __post_config_check(self, data_id, group, tenant): '检查md5, nacos又返回值 则配置有更新' headers = {'Long-Pulling-Timeout': '3000'} if (tenant and (tenant != 'public')): data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}{tenant}'} else: data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}'} url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs/listener') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.post(url, data=data, headers=headers, proxy=self.nacos_client.proxy) as response: check_status(response.status) res = (await response.read()) return res
检查md5, nacos又返回值 则配置有更新
aio_nacos/nacos_config.py
__post_config_check
zqsc/aio-nacos
2
python
async def __post_config_check(self, data_id, group, tenant): headers = {'Long-Pulling-Timeout': '3000'} if (tenant and (tenant != 'public')): data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}{tenant}'} else: data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}'} url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs/listener') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.post(url, data=data, headers=headers, proxy=self.nacos_client.proxy) as response: check_status(response.status) res = (await response.read()) return res
async def __post_config_check(self, data_id, group, tenant): headers = {'Long-Pulling-Timeout': '3000'} if (tenant and (tenant != 'public')): data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}{tenant}'} else: data = {'Listening-Configs': f'{data_id}{group}{self.config_pool.get(data_id).md5}'} url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs/listener') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.post(url, data=data, headers=headers, proxy=self.nacos_client.proxy) as response: check_status(response.status) res = (await response.read()) return res<|docstring|>检查md5, nacos又返回值 则配置有更新<|endoftext|>
d689e1ec675c74e980f8ce7273883ff0e0a4972a76b5f9aedb50a4b36ec239ed
async def __get_config(self, data_id, group, tenant): '获得配置配置, 并写入配置池中' self.logger.info(('从nacos中更新配置-data_id:%s; grout:%s; tenant:%s' % (data_id, group, tenant))) params = {'dataId': data_id, 'group': group} if (tenant and (tenant != 'public')): params['tenant'] = tenant url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.get(url=url, params=params, proxy=self.nacos_client.proxy) as response: check_status(response.status) conf_md5 = response.headers.getone('Content-MD5') conf_type = response.headers.getone('Config-Type') res = (await response.read()) if (conf_type == 'json'): conf = Config(conf_md5) conf.__dict__.update(json.loads(res)) self.config_pool[data_id] = conf
获得配置配置, 并写入配置池中
aio_nacos/nacos_config.py
__get_config
zqsc/aio-nacos
2
python
async def __get_config(self, data_id, group, tenant): self.logger.info(('从nacos中更新配置-data_id:%s; grout:%s; tenant:%s' % (data_id, group, tenant))) params = {'dataId': data_id, 'group': group} if (tenant and (tenant != 'public')): params['tenant'] = tenant url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.get(url=url, params=params, proxy=self.nacos_client.proxy) as response: check_status(response.status) conf_md5 = response.headers.getone('Content-MD5') conf_type = response.headers.getone('Config-Type') res = (await response.read()) if (conf_type == 'json'): conf = Config(conf_md5) conf.__dict__.update(json.loads(res)) self.config_pool[data_id] = conf
async def __get_config(self, data_id, group, tenant): self.logger.info(('从nacos中更新配置-data_id:%s; grout:%s; tenant:%s' % (data_id, group, tenant))) params = {'dataId': data_id, 'group': group} if (tenant and (tenant != 'public')): params['tenant'] = tenant url = (self.nacos_client.nacos_addr + '/nacos/v1/cs/configs') url = self.nacos_client.add_url_auth(url) async with self.nacos_client.session.get(url=url, params=params, proxy=self.nacos_client.proxy) as response: check_status(response.status) conf_md5 = response.headers.getone('Content-MD5') conf_type = response.headers.getone('Config-Type') res = (await response.read()) if (conf_type == 'json'): conf = Config(conf_md5) conf.__dict__.update(json.loads(res)) self.config_pool[data_id] = conf<|docstring|>获得配置配置, 并写入配置池中<|endoftext|>
d8d8e19e52e979e9779557cc60658957707c66968e90d7c8ff4ebaad33ca3add
def make_trending_cache_key(time_range, genre, version=DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]): 'Makes a cache key resembling `generated-trending:week:electronic`' version_name = (f':{version.name}' if (version != DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]) else '') return f"generated-trending{version_name}:{time_range}:{(genre.lower() if genre else '')}"
Makes a cache key resembling `generated-trending:week:electronic`
discovery-provider/src/queries/get_trending_tracks.py
make_trending_cache_key
lucylow/audius-protocol
1
python
def make_trending_cache_key(time_range, genre, version=DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]): version_name = (f':{version.name}' if (version != DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]) else ) return f"generated-trending{version_name}:{time_range}:{(genre.lower() if genre else )}"
def make_trending_cache_key(time_range, genre, version=DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]): version_name = (f':{version.name}' if (version != DEFAULT_TRENDING_VERSIONS[TrendingType.TRACKS]) else ) return f"generated-trending{version_name}:{time_range}:{(genre.lower() if genre else )}"<|docstring|>Makes a cache key resembling `generated-trending:week:electronic`<|endoftext|>
127b04b641ccca5de4073cfb5c159429cf2708824a8d933a509f67b4357c0048
def make_generate_unpopulated_trending(session, genre, time_range, strategy): 'Wraps a call to `generate_unpopulated_trending` for use in `use_redis_cache`, which\n expects to be passed a function with no arguments.' def wrapped(): if strategy.use_mat_view: return generate_unpopulated_trending_from_mat_views(session, genre, time_range, strategy) return generate_unpopulated_trending(session, genre, time_range, strategy) return wrapped
Wraps a call to `generate_unpopulated_trending` for use in `use_redis_cache`, which expects to be passed a function with no arguments.
discovery-provider/src/queries/get_trending_tracks.py
make_generate_unpopulated_trending
lucylow/audius-protocol
1
python
def make_generate_unpopulated_trending(session, genre, time_range, strategy): 'Wraps a call to `generate_unpopulated_trending` for use in `use_redis_cache`, which\n expects to be passed a function with no arguments.' def wrapped(): if strategy.use_mat_view: return generate_unpopulated_trending_from_mat_views(session, genre, time_range, strategy) return generate_unpopulated_trending(session, genre, time_range, strategy) return wrapped
def make_generate_unpopulated_trending(session, genre, time_range, strategy): 'Wraps a call to `generate_unpopulated_trending` for use in `use_redis_cache`, which\n expects to be passed a function with no arguments.' def wrapped(): if strategy.use_mat_view: return generate_unpopulated_trending_from_mat_views(session, genre, time_range, strategy) return generate_unpopulated_trending(session, genre, time_range, strategy) return wrapped<|docstring|>Wraps a call to `generate_unpopulated_trending` for use in `use_redis_cache`, which expects to be passed a function with no arguments.<|endoftext|>
38209f940b946b865cffd08e4846b980df178b90266c26b2d7afb347da660107
def get_trending_tracks(args: GetTrendingTracksArgs, strategy: BaseTrendingStrategy): 'Gets trending by getting the currently cached tracks and then populating them.' db = get_db_read_replica() with db.scoped_session() as session: return _get_trending_tracks_with_session(session, args, strategy)
Gets trending by getting the currently cached tracks and then populating them.
discovery-provider/src/queries/get_trending_tracks.py
get_trending_tracks
lucylow/audius-protocol
1
python
def get_trending_tracks(args: GetTrendingTracksArgs, strategy: BaseTrendingStrategy): db = get_db_read_replica() with db.scoped_session() as session: return _get_trending_tracks_with_session(session, args, strategy)
def get_trending_tracks(args: GetTrendingTracksArgs, strategy: BaseTrendingStrategy): db = get_db_read_replica() with db.scoped_session() as session: return _get_trending_tracks_with_session(session, args, strategy)<|docstring|>Gets trending by getting the currently cached tracks and then populating them.<|endoftext|>
a71f6df6ba8dacea95c77b6ae352c69173be8bafc19570bbabf15ac233c32dcc
def create_ec2_instance(stack, name, ami, subnetid, keyname, instance_profile='', instance_type='t1.micro', security_groups=(), user_data=''): 'Add EC2 Instance Resource.' return stack.stack.add_resource(Instance('{0}'.format(name), ImageId=ami, InstanceType=instance_type, KeyName=keyname, SecurityGroupIds=list(security_groups), SubnetId=subnetid, Tags=Tags(Name=name), UserData=Base64(user_data), IamInstanceProfile=instance_profile))
Add EC2 Instance Resource.
tropohelper/instances.py
create_ec2_instance
devblueray/tropohelper
0
python
def create_ec2_instance(stack, name, ami, subnetid, keyname, instance_profile=, instance_type='t1.micro', security_groups=(), user_data=): return stack.stack.add_resource(Instance('{0}'.format(name), ImageId=ami, InstanceType=instance_type, KeyName=keyname, SecurityGroupIds=list(security_groups), SubnetId=subnetid, Tags=Tags(Name=name), UserData=Base64(user_data), IamInstanceProfile=instance_profile))
def create_ec2_instance(stack, name, ami, subnetid, keyname, instance_profile=, instance_type='t1.micro', security_groups=(), user_data=): return stack.stack.add_resource(Instance('{0}'.format(name), ImageId=ami, InstanceType=instance_type, KeyName=keyname, SecurityGroupIds=list(security_groups), SubnetId=subnetid, Tags=Tags(Name=name), UserData=Base64(user_data), IamInstanceProfile=instance_profile))<|docstring|>Add EC2 Instance Resource.<|endoftext|>
75e60cfc98ae60758f99a82c96cee80f8cfc8bc325e288bea0fc91d32018333c
def create_launch_config(stack, name, ami, security_group, instance_type, profile, block_devices=[], user_data=''): 'Add EC2 LaunchConfiguration Resource.' return stack.stack.add_resource(LaunchConfiguration('{0}{1}LC'.format(stack.env, name.replace('_', '')), ImageId=ami, KeyName=Ref(stack.ssh_key_param), SecurityGroups=security_group, InstanceType=instance_type, IamInstanceProfile=profile, UserData=Base64(user_data), BlockDeviceMappings=block_devices))
Add EC2 LaunchConfiguration Resource.
tropohelper/instances.py
create_launch_config
devblueray/tropohelper
0
python
def create_launch_config(stack, name, ami, security_group, instance_type, profile, block_devices=[], user_data=): return stack.stack.add_resource(LaunchConfiguration('{0}{1}LC'.format(stack.env, name.replace('_', )), ImageId=ami, KeyName=Ref(stack.ssh_key_param), SecurityGroups=security_group, InstanceType=instance_type, IamInstanceProfile=profile, UserData=Base64(user_data), BlockDeviceMappings=block_devices))
def create_launch_config(stack, name, ami, security_group, instance_type, profile, block_devices=[], user_data=): return stack.stack.add_resource(LaunchConfiguration('{0}{1}LC'.format(stack.env, name.replace('_', )), ImageId=ami, KeyName=Ref(stack.ssh_key_param), SecurityGroups=security_group, InstanceType=instance_type, IamInstanceProfile=profile, UserData=Base64(user_data), BlockDeviceMappings=block_devices))<|docstring|>Add EC2 LaunchConfiguration Resource.<|endoftext|>
ee0a271262af3d03ee5437363b754158deff12565b839a6cd6c6f73fa1220d05
def create_autoscale_group(stack, name, launch_con, vpc_zones, elbs=[], target_groups=[]): 'Add EC2 AutoScalingGroup Resource.' return stack.stack.add_resource(AutoScalingGroup('{0}{1}ASG'.format(stack.env, name.replace('_', '')), LaunchConfigurationName=Ref(launch_con), MinSize='0', MaxSize='5', HealthCheckType='EC2', VPCZoneIdentifier=vpc_zones, TerminationPolicies=['OldestInstance'], LoadBalancerNames=elbs, TargetGroupARNs=target_groups))
Add EC2 AutoScalingGroup Resource.
tropohelper/instances.py
create_autoscale_group
devblueray/tropohelper
0
python
def create_autoscale_group(stack, name, launch_con, vpc_zones, elbs=[], target_groups=[]): return stack.stack.add_resource(AutoScalingGroup('{0}{1}ASG'.format(stack.env, name.replace('_', )), LaunchConfigurationName=Ref(launch_con), MinSize='0', MaxSize='5', HealthCheckType='EC2', VPCZoneIdentifier=vpc_zones, TerminationPolicies=['OldestInstance'], LoadBalancerNames=elbs, TargetGroupARNs=target_groups))
def create_autoscale_group(stack, name, launch_con, vpc_zones, elbs=[], target_groups=[]): return stack.stack.add_resource(AutoScalingGroup('{0}{1}ASG'.format(stack.env, name.replace('_', )), LaunchConfigurationName=Ref(launch_con), MinSize='0', MaxSize='5', HealthCheckType='EC2', VPCZoneIdentifier=vpc_zones, TerminationPolicies=['OldestInstance'], LoadBalancerNames=elbs, TargetGroupARNs=target_groups))<|docstring|>Add EC2 AutoScalingGroup Resource.<|endoftext|>
3dd18c1663647b79dbd1cd183c7df6842c02aacfe2aca2e05261f490d85433d1
def create_db_param_group(stack, name, description, family, parameters={}): 'Create a DB Parameter Group' return stack.stack.add_resource(DBParameterGroup('{0}DBParamGroup'.format(name), Description='{0} Parameter Group'.format(description), Family=family, Parameters=parameters))
Create a DB Parameter Group
tropohelper/instances.py
create_db_param_group
devblueray/tropohelper
0
python
def create_db_param_group(stack, name, description, family, parameters={}): return stack.stack.add_resource(DBParameterGroup('{0}DBParamGroup'.format(name), Description='{0} Parameter Group'.format(description), Family=family, Parameters=parameters))
def create_db_param_group(stack, name, description, family, parameters={}): return stack.stack.add_resource(DBParameterGroup('{0}DBParamGroup'.format(name), Description='{0} Parameter Group'.format(description), Family=family, Parameters=parameters))<|docstring|>Create a DB Parameter Group<|endoftext|>
125561f9178de364db51c2aa9770718ab1449767fedb16f003ca16d4be66d423
def create_rds_instance(stack, db_instance_identifier, db_name, db_instance_class, db_username, db_password, db_subnet_group, db_security_groups, vpc_security_groups, db_param_group, allocated_storage='20', engine='MySQL', engine_version='5.7.17', storage_encrypted='True', deletion_policy='Retain', multi_az=False): 'Add RDS Instance Resource.' return stack.stack.add_resource(DBInstance('RDSInstance', DBInstanceIdentifier=db_instance_identifier, DBName=db_name, DBInstanceClass=db_instance_class, AllocatedStorage=allocated_storage, Engine=engine, EngineVersion=engine_version, MasterUsername=db_username, MasterUserPassword=db_password, DBSubnetGroupName=db_subnet_group, DBSecurityGroups=list(db_security_groups), VPCSecurityGroups=list(vpc_security_groups), DBParameterGroupName=db_param_group, StorageEncrypted=storage_encrypted, DeletionPolicy=deletion_policy, MultiAZ=multi_az))
Add RDS Instance Resource.
tropohelper/instances.py
create_rds_instance
devblueray/tropohelper
0
python
def create_rds_instance(stack, db_instance_identifier, db_name, db_instance_class, db_username, db_password, db_subnet_group, db_security_groups, vpc_security_groups, db_param_group, allocated_storage='20', engine='MySQL', engine_version='5.7.17', storage_encrypted='True', deletion_policy='Retain', multi_az=False): return stack.stack.add_resource(DBInstance('RDSInstance', DBInstanceIdentifier=db_instance_identifier, DBName=db_name, DBInstanceClass=db_instance_class, AllocatedStorage=allocated_storage, Engine=engine, EngineVersion=engine_version, MasterUsername=db_username, MasterUserPassword=db_password, DBSubnetGroupName=db_subnet_group, DBSecurityGroups=list(db_security_groups), VPCSecurityGroups=list(vpc_security_groups), DBParameterGroupName=db_param_group, StorageEncrypted=storage_encrypted, DeletionPolicy=deletion_policy, MultiAZ=multi_az))
def create_rds_instance(stack, db_instance_identifier, db_name, db_instance_class, db_username, db_password, db_subnet_group, db_security_groups, vpc_security_groups, db_param_group, allocated_storage='20', engine='MySQL', engine_version='5.7.17', storage_encrypted='True', deletion_policy='Retain', multi_az=False): return stack.stack.add_resource(DBInstance('RDSInstance', DBInstanceIdentifier=db_instance_identifier, DBName=db_name, DBInstanceClass=db_instance_class, AllocatedStorage=allocated_storage, Engine=engine, EngineVersion=engine_version, MasterUsername=db_username, MasterUserPassword=db_password, DBSubnetGroupName=db_subnet_group, DBSecurityGroups=list(db_security_groups), VPCSecurityGroups=list(vpc_security_groups), DBParameterGroupName=db_param_group, StorageEncrypted=storage_encrypted, DeletionPolicy=deletion_policy, MultiAZ=multi_az))<|docstring|>Add RDS Instance Resource.<|endoftext|>
8182611ce14b8db3493b0347586e67b207f1a89d92495e93aec445b1a3616b19
def mymake_blob_url(container_name, blob_name): "\n Creates the url to access a blob.\n container_name: Name of container.\n blob_name: Name of blob.\n account_name:\n Name of the storage account. If not specified, uses the account\n specified when BlobService was initialized.\n protocol:\n Protocol to use: 'http' or 'https'. If not specified, uses the\n protocol specified when BlobService was initialized.\n host_base:\n Live host base url. If not specified, uses the host base specified\n when BlobService was initialized.\n " return '{0}://{1}{2}/{3}/{4}'.format(settings.AZURE_PROTOCOL, settings.AZURE_STORAGE_ACCOUNT, settings.AZURE_HOST_BASE, container_name, blob_name)
Creates the url to access a blob. container_name: Name of container. blob_name: Name of blob. account_name: Name of the storage account. If not specified, uses the account specified when BlobService was initialized. protocol: Protocol to use: 'http' or 'https'. If not specified, uses the protocol specified when BlobService was initialized. host_base: Live host base url. If not specified, uses the host base specified when BlobService was initialized.
web/survey/views.py
mymake_blob_url
sbreslav/mimic
3
python
def mymake_blob_url(container_name, blob_name): "\n Creates the url to access a blob.\n container_name: Name of container.\n blob_name: Name of blob.\n account_name:\n Name of the storage account. If not specified, uses the account\n specified when BlobService was initialized.\n protocol:\n Protocol to use: 'http' or 'https'. If not specified, uses the\n protocol specified when BlobService was initialized.\n host_base:\n Live host base url. If not specified, uses the host base specified\n when BlobService was initialized.\n " return '{0}://{1}{2}/{3}/{4}'.format(settings.AZURE_PROTOCOL, settings.AZURE_STORAGE_ACCOUNT, settings.AZURE_HOST_BASE, container_name, blob_name)
def mymake_blob_url(container_name, blob_name): "\n Creates the url to access a blob.\n container_name: Name of container.\n blob_name: Name of blob.\n account_name:\n Name of the storage account. If not specified, uses the account\n specified when BlobService was initialized.\n protocol:\n Protocol to use: 'http' or 'https'. If not specified, uses the\n protocol specified when BlobService was initialized.\n host_base:\n Live host base url. If not specified, uses the host base specified\n when BlobService was initialized.\n " return '{0}://{1}{2}/{3}/{4}'.format(settings.AZURE_PROTOCOL, settings.AZURE_STORAGE_ACCOUNT, settings.AZURE_HOST_BASE, container_name, blob_name)<|docstring|>Creates the url to access a blob. container_name: Name of container. blob_name: Name of blob. account_name: Name of the storage account. If not specified, uses the account specified when BlobService was initialized. protocol: Protocol to use: 'http' or 'https'. If not specified, uses the protocol specified when BlobService was initialized. host_base: Live host base url. If not specified, uses the host base specified when BlobService was initialized.<|endoftext|>
f095b52f7ef4933307c4e15a5545f1ecb02ea121de8125e6305b1b92e54e321e
def find_FWHM(max_time_index, freq_index): '\n Find full width at half maximum in time-direction for a given time and freq\n index. The time index is assumed to be the index where the energy is maximal\n for this frequency.\n\n Returns (center time in seconds, FWMH in seconds)\n ' max_value = energy_values[(max_time_index, freq_index)] min_time = times_in_seconds[max_time_index] max_time = times_in_seconds[max_time_index] for i in range((max_time_index + 1), energy_values.shape[0]): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): max_time = ((max_time + times_in_seconds[i]) / 2) break max_time = times_in_seconds[i] for i in range((max_time_index - 1), 0, (- 1)): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): min_time = ((min_time + times_in_seconds[i]) / 2) break min_time = times_in_seconds[i] avg = ((min_time + max_time) / 2) return (avg, (max_time - min_time))
Find full width at half maximum in time-direction for a given time and freq index. The time index is assumed to be the index where the energy is maximal for this frequency. Returns (center time in seconds, FWMH in seconds)
exp1-ligo/analyze.py
find_FWHM
rluhtaru/jlab
0
python
def find_FWHM(max_time_index, freq_index): '\n Find full width at half maximum in time-direction for a given time and freq\n index. The time index is assumed to be the index where the energy is maximal\n for this frequency.\n\n Returns (center time in seconds, FWMH in seconds)\n ' max_value = energy_values[(max_time_index, freq_index)] min_time = times_in_seconds[max_time_index] max_time = times_in_seconds[max_time_index] for i in range((max_time_index + 1), energy_values.shape[0]): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): max_time = ((max_time + times_in_seconds[i]) / 2) break max_time = times_in_seconds[i] for i in range((max_time_index - 1), 0, (- 1)): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): min_time = ((min_time + times_in_seconds[i]) / 2) break min_time = times_in_seconds[i] avg = ((min_time + max_time) / 2) return (avg, (max_time - min_time))
def find_FWHM(max_time_index, freq_index): '\n Find full width at half maximum in time-direction for a given time and freq\n index. The time index is assumed to be the index where the energy is maximal\n for this frequency.\n\n Returns (center time in seconds, FWMH in seconds)\n ' max_value = energy_values[(max_time_index, freq_index)] min_time = times_in_seconds[max_time_index] max_time = times_in_seconds[max_time_index] for i in range((max_time_index + 1), energy_values.shape[0]): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): max_time = ((max_time + times_in_seconds[i]) / 2) break max_time = times_in_seconds[i] for i in range((max_time_index - 1), 0, (- 1)): value = energy_values[(i, freq_index)] if (value < (max_value / 2)): min_time = ((min_time + times_in_seconds[i]) / 2) break min_time = times_in_seconds[i] avg = ((min_time + max_time) / 2) return (avg, (max_time - min_time))<|docstring|>Find full width at half maximum in time-direction for a given time and freq index. The time index is assumed to be the index where the energy is maximal for this frequency. Returns (center time in seconds, FWMH in seconds)<|endoftext|>
bafc607b0f02c36f0077f3bbb819f9e55d5180f2210a2c5ff5572ab8b3e179d0
def gwfreq(t, t0, M): '\n Frequency (not angular frequency!) model for gravitational waves.\n t - time in seconds\n t0 - event time in seconds\n M - chirp mass in sun masses\n Returns f in Hz.\n ' const = (((1 / (2 * np.pi)) * 948.5) * np.power((1 / M), (5 / 8))) TIME_CUTOFF = 1e-05 return (const * np.power(np.maximum((t0 - t), TIME_CUTOFF), ((- 3) / 8)))
Frequency (not angular frequency!) model for gravitational waves. t - time in seconds t0 - event time in seconds M - chirp mass in sun masses Returns f in Hz.
exp1-ligo/analyze.py
gwfreq
rluhtaru/jlab
0
python
def gwfreq(t, t0, M): '\n Frequency (not angular frequency!) model for gravitational waves.\n t - time in seconds\n t0 - event time in seconds\n M - chirp mass in sun masses\n Returns f in Hz.\n ' const = (((1 / (2 * np.pi)) * 948.5) * np.power((1 / M), (5 / 8))) TIME_CUTOFF = 1e-05 return (const * np.power(np.maximum((t0 - t), TIME_CUTOFF), ((- 3) / 8)))
def gwfreq(t, t0, M): '\n Frequency (not angular frequency!) model for gravitational waves.\n t - time in seconds\n t0 - event time in seconds\n M - chirp mass in sun masses\n Returns f in Hz.\n ' const = (((1 / (2 * np.pi)) * 948.5) * np.power((1 / M), (5 / 8))) TIME_CUTOFF = 1e-05 return (const * np.power(np.maximum((t0 - t), TIME_CUTOFF), ((- 3) / 8)))<|docstring|>Frequency (not angular frequency!) model for gravitational waves. t - time in seconds t0 - event time in seconds M - chirp mass in sun masses Returns f in Hz.<|endoftext|>
aa396d5774ff5f6392d1e1ead36fbb0daf4474b3ddac86b362d98e0cc788770b
def inv_gwfreq(f, t0, M): '\n Inverse of gwfreq.\n ' return (t0 - ((((948.5 / (2 * np.pi)) ** (8 / 3)) * np.power((1 / M), (5 / 3))) * np.power(f, ((- 8) / 3))))
Inverse of gwfreq.
exp1-ligo/analyze.py
inv_gwfreq
rluhtaru/jlab
0
python
def inv_gwfreq(f, t0, M): '\n \n ' return (t0 - ((((948.5 / (2 * np.pi)) ** (8 / 3)) * np.power((1 / M), (5 / 3))) * np.power(f, ((- 8) / 3))))
def inv_gwfreq(f, t0, M): '\n \n ' return (t0 - ((((948.5 / (2 * np.pi)) ** (8 / 3)) * np.power((1 / M), (5 / 3))) * np.power(f, ((- 8) / 3))))<|docstring|>Inverse of gwfreq.<|endoftext|>
ac159b40dc981b71e48421be003cde88b67a0540a43e376d82f5807d81563a18
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report_for_unknown_app(self, mock_post): 'When the app is unknown scout will return an HTTP 404 but the report function should just act normally' scout = Scout(app='unknown', version='0.1.0', install_id=install_id) resp = scout.report() logging.debug(('SR: %s' % resp)) self.assertEqual(resp, {'latest_version': '0.1.0'})
When the app is unknown scout will return an HTTP 404 but the report function should just act normally
scout/test_scout.py
test_report_for_unknown_app
datawire/scout-py
0
python
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report_for_unknown_app(self, mock_post): scout = Scout(app='unknown', version='0.1.0', install_id=install_id) resp = scout.report() logging.debug(('SR: %s' % resp)) self.assertEqual(resp, {'latest_version': '0.1.0'})
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report_for_unknown_app(self, mock_post): scout = Scout(app='unknown', version='0.1.0', install_id=install_id) resp = scout.report() logging.debug(('SR: %s' % resp)) self.assertEqual(resp, {'latest_version': '0.1.0'})<|docstring|>When the app is unknown scout will return an HTTP 404 but the report function should just act normally<|endoftext|>
5616f40a847bb8f31e9774f5d850ee0653781830168eb0757178510d0c6bb21b
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report(self, mock_post): 'Scout backend returns the latest version. The scout client returns this to the caller.' scout = Scout(app='foshizzolator', version='0.1.0', install_id=install_id) resp = scout.report() self.assertEqual(resp, {'latest_version': '0.2.0'})
Scout backend returns the latest version. The scout client returns this to the caller.
scout/test_scout.py
test_report
datawire/scout-py
0
python
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report(self, mock_post): scout = Scout(app='foshizzolator', version='0.1.0', install_id=install_id) resp = scout.report() self.assertEqual(resp, {'latest_version': '0.2.0'})
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_report(self, mock_post): scout = Scout(app='foshizzolator', version='0.1.0', install_id=install_id) resp = scout.report() self.assertEqual(resp, {'latest_version': '0.2.0'})<|docstring|>Scout backend returns the latest version. The scout client returns this to the caller.<|endoftext|>
b3900a532c2e55607c4e78f6f8a33ed97959f360c73eba5c14975419c7509411
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_plugin(self, mock_post): 'Scout install-id plugin should set the install_id and requisite metadata.' scout = Scout(app='foshizzolator', version='0.1.0', id_plugin=install_id_plugin, id_plugin_args={'hello': 'world'}) self.assertEqual(scout.install_id, PLUGIN_UUID) self.assertEqual(scout.metadata['new_install'], True) self.assertEqual(scout.metadata['swallow_speed'], 42) self.assertEqual(scout.metadata['hello'], 'world')
Scout install-id plugin should set the install_id and requisite metadata.
scout/test_scout.py
test_plugin
datawire/scout-py
0
python
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_plugin(self, mock_post): scout = Scout(app='foshizzolator', version='0.1.0', id_plugin=install_id_plugin, id_plugin_args={'hello': 'world'}) self.assertEqual(scout.install_id, PLUGIN_UUID) self.assertEqual(scout.metadata['new_install'], True) self.assertEqual(scout.metadata['swallow_speed'], 42) self.assertEqual(scout.metadata['hello'], 'world')
@mock.patch('requests.post', side_effect=mocked_requests_post) def test_plugin(self, mock_post): scout = Scout(app='foshizzolator', version='0.1.0', id_plugin=install_id_plugin, id_plugin_args={'hello': 'world'}) self.assertEqual(scout.install_id, PLUGIN_UUID) self.assertEqual(scout.metadata['new_install'], True) self.assertEqual(scout.metadata['swallow_speed'], 42) self.assertEqual(scout.metadata['hello'], 'world')<|docstring|>Scout install-id plugin should set the install_id and requisite metadata.<|endoftext|>
ce656cf81bd49658b22312d9d717b01515a3c9bdc07372f6e9926676ac3e0c51
def build_mnist_mlp_net(model, input_blob_name): 'Create a feedforward neural network composed of fullyconnected layers.\n A final softmax layer is used to get probabilities for the 10 numbers.' fc_layer_0_input_dims = MNIST_IMG_PIXEL_NUM fc_layer_0_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_0 = brew.fc(model, input_blob_name, 'fc_layer_0', dim_in=fc_layer_0_input_dims, dim_out=fc_layer_0_output_dims) relu_layer_0 = brew.relu(model, fc_layer_0, 'relu_layer_0') fc_layer_1_input_dims = fc_layer_0_output_dims fc_layer_1_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_1 = brew.fc(model, relu_layer_0, 'fc_layer_1', dim_in=fc_layer_1_input_dims, dim_out=fc_layer_1_output_dims) relu_layer_1 = brew.relu(model, fc_layer_1, 'relu_layer_1') fc_layer_2_input_dims = fc_layer_1_output_dims fc_layer_2_output_dims = MNIST_IMG_PIXEL_NUM fc_layer_2 = brew.fc(model, relu_layer_1, 'fc_layer_2', dim_in=fc_layer_2_input_dims, dim_out=fc_layer_2_output_dims) relu_layer_2 = brew.relu(model, fc_layer_2, 'relu_layer_2') softmax_layer = brew.softmax(model, relu_layer_2, 'softmax_layer') return softmax_layer
Create a feedforward neural network composed of fullyconnected layers. A final softmax layer is used to get probabilities for the 10 numbers.
Chapter02/infer_mnist_mlp.py
build_mnist_mlp_net
PacktPublishing/Caffe2-Quick-Start-Guide
8
python
def build_mnist_mlp_net(model, input_blob_name): 'Create a feedforward neural network composed of fullyconnected layers.\n A final softmax layer is used to get probabilities for the 10 numbers.' fc_layer_0_input_dims = MNIST_IMG_PIXEL_NUM fc_layer_0_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_0 = brew.fc(model, input_blob_name, 'fc_layer_0', dim_in=fc_layer_0_input_dims, dim_out=fc_layer_0_output_dims) relu_layer_0 = brew.relu(model, fc_layer_0, 'relu_layer_0') fc_layer_1_input_dims = fc_layer_0_output_dims fc_layer_1_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_1 = brew.fc(model, relu_layer_0, 'fc_layer_1', dim_in=fc_layer_1_input_dims, dim_out=fc_layer_1_output_dims) relu_layer_1 = brew.relu(model, fc_layer_1, 'relu_layer_1') fc_layer_2_input_dims = fc_layer_1_output_dims fc_layer_2_output_dims = MNIST_IMG_PIXEL_NUM fc_layer_2 = brew.fc(model, relu_layer_1, 'fc_layer_2', dim_in=fc_layer_2_input_dims, dim_out=fc_layer_2_output_dims) relu_layer_2 = brew.relu(model, fc_layer_2, 'relu_layer_2') softmax_layer = brew.softmax(model, relu_layer_2, 'softmax_layer') return softmax_layer
def build_mnist_mlp_net(model, input_blob_name): 'Create a feedforward neural network composed of fullyconnected layers.\n A final softmax layer is used to get probabilities for the 10 numbers.' fc_layer_0_input_dims = MNIST_IMG_PIXEL_NUM fc_layer_0_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_0 = brew.fc(model, input_blob_name, 'fc_layer_0', dim_in=fc_layer_0_input_dims, dim_out=fc_layer_0_output_dims) relu_layer_0 = brew.relu(model, fc_layer_0, 'relu_layer_0') fc_layer_1_input_dims = fc_layer_0_output_dims fc_layer_1_output_dims = (MNIST_IMG_PIXEL_NUM * 2) fc_layer_1 = brew.fc(model, relu_layer_0, 'fc_layer_1', dim_in=fc_layer_1_input_dims, dim_out=fc_layer_1_output_dims) relu_layer_1 = brew.relu(model, fc_layer_1, 'relu_layer_1') fc_layer_2_input_dims = fc_layer_1_output_dims fc_layer_2_output_dims = MNIST_IMG_PIXEL_NUM fc_layer_2 = brew.fc(model, relu_layer_1, 'fc_layer_2', dim_in=fc_layer_2_input_dims, dim_out=fc_layer_2_output_dims) relu_layer_2 = brew.relu(model, fc_layer_2, 'relu_layer_2') softmax_layer = brew.softmax(model, relu_layer_2, 'softmax_layer') return softmax_layer<|docstring|>Create a feedforward neural network composed of fullyconnected layers. A final softmax layer is used to get probabilities for the 10 numbers.<|endoftext|>
db1b8139b67e13911f0799d9081d802032693a7ca8a03daad31b0870e44f0a95
def set_model_weights(inference_model): 'Set the weights of the fully connected layers in the inference network.\n Weights are pre-trained and are read from NumPy files on disk.' for (i, layer_blob_name) in enumerate(inference_model.params): layer_weights_filepath = 'mnist_mlp_weights/{}.npy'.format(str(i)) layer_weights = np.load(layer_weights_filepath, allow_pickle=False) workspace.FeedBlob(layer_blob_name, layer_weights)
Set the weights of the fully connected layers in the inference network. Weights are pre-trained and are read from NumPy files on disk.
Chapter02/infer_mnist_mlp.py
set_model_weights
PacktPublishing/Caffe2-Quick-Start-Guide
8
python
def set_model_weights(inference_model): 'Set the weights of the fully connected layers in the inference network.\n Weights are pre-trained and are read from NumPy files on disk.' for (i, layer_blob_name) in enumerate(inference_model.params): layer_weights_filepath = 'mnist_mlp_weights/{}.npy'.format(str(i)) layer_weights = np.load(layer_weights_filepath, allow_pickle=False) workspace.FeedBlob(layer_blob_name, layer_weights)
def set_model_weights(inference_model): 'Set the weights of the fully connected layers in the inference network.\n Weights are pre-trained and are read from NumPy files on disk.' for (i, layer_blob_name) in enumerate(inference_model.params): layer_weights_filepath = 'mnist_mlp_weights/{}.npy'.format(str(i)) layer_weights = np.load(layer_weights_filepath, allow_pickle=False) workspace.FeedBlob(layer_blob_name, layer_weights)<|docstring|>Set the weights of the fully connected layers in the inference network. Weights are pre-trained and are read from NumPy files on disk.<|endoftext|>
1f3cb5ac142c0a92158b9e5ad6697edb4f8e87fdaa0018adc4fde7bf691b1051
def backoff(self, item): 'Get the backoff time for an item (in seconds)' exp = self._failures[item] self._failures[item] = (exp + 1) backoff = min((self.base_delay * (2 ** exp)), self.max_delay) return backoff
Get the backoff time for an item (in seconds)
dask-gateway-server/dask_gateway_server/workqueue.py
backoff
CrispyCrafter/dask-gateway
69
python
def backoff(self, item): exp = self._failures[item] self._failures[item] = (exp + 1) backoff = min((self.base_delay * (2 ** exp)), self.max_delay) return backoff
def backoff(self, item): exp = self._failures[item] self._failures[item] = (exp + 1) backoff = min((self.base_delay * (2 ** exp)), self.max_delay) return backoff<|docstring|>Get the backoff time for an item (in seconds)<|endoftext|>
60ad761840782561d14880de2b709953ee6601d749b176c38aacfb19d4baaae7
def failures(self, item): 'Get the number of failures seen for an item' return self._failures.get(item, 0)
Get the number of failures seen for an item
dask-gateway-server/dask_gateway_server/workqueue.py
failures
CrispyCrafter/dask-gateway
69
python
def failures(self, item): return self._failures.get(item, 0)
def failures(self, item): return self._failures.get(item, 0)<|docstring|>Get the number of failures seen for an item<|endoftext|>
614af9cc7e2e0a4d714b4a565827bc2b6fe926dbad39b86169a63918582c68fe
def reset(self, item): 'Reset the backoff for an item' self._failures.pop(item, None)
Reset the backoff for an item
dask-gateway-server/dask_gateway_server/workqueue.py
reset
CrispyCrafter/dask-gateway
69
python
def reset(self, item): self._failures.pop(item, None)
def reset(self, item): self._failures.pop(item, None)<|docstring|>Reset the backoff for an item<|endoftext|>
369139195261de49070f70b0b9245afb24bd6539657302d41705f4c7ebe613f9
def is_empty(self): 'True if there are no items queued' return (not self._dirty)
True if there are no items queued
dask-gateway-server/dask_gateway_server/workqueue.py
is_empty
CrispyCrafter/dask-gateway
69
python
def is_empty(self): return (not self._dirty)
def is_empty(self): return (not self._dirty)<|docstring|>True if there are no items queued<|endoftext|>
a84614db1aa8c711d556185e69f1bcfb7a2e964609b9329bb8ffb08946962d07
def put(self, item): 'Put an item in the queue' self._put(item) self._wakeup_next()
Put an item in the queue
dask-gateway-server/dask_gateway_server/workqueue.py
put
CrispyCrafter/dask-gateway
69
python
def put(self, item): self._put(item) self._wakeup_next()
def put(self, item): self._put(item) self._wakeup_next()<|docstring|>Put an item in the queue<|endoftext|>
dee4a6d42c1a1e825b1e5b8cc8230fae995749c8d4def2900f1a69289bc96f01
def put_after(self, item, delay): 'Schedule an item to be put in the queue after a delay.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' when = (self._loop.time() + delay) existing = self._timers.get(item, None) if ((existing is None) or (existing[1] > when)): if (existing is not None): existing[0].cancel() if (delay > 0): self._timers[item] = (self._loop.call_at(when, self._put_delayed, item), when) else: self._timers.pop(item, None) self.put(item)
Schedule an item to be put in the queue after a delay. If the item is already scheduled, it will be rescheduled only if the delay would enqueue it sooner than the existing schedule.
dask-gateway-server/dask_gateway_server/workqueue.py
put_after
CrispyCrafter/dask-gateway
69
python
def put_after(self, item, delay): 'Schedule an item to be put in the queue after a delay.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' when = (self._loop.time() + delay) existing = self._timers.get(item, None) if ((existing is None) or (existing[1] > when)): if (existing is not None): existing[0].cancel() if (delay > 0): self._timers[item] = (self._loop.call_at(when, self._put_delayed, item), when) else: self._timers.pop(item, None) self.put(item)
def put_after(self, item, delay): 'Schedule an item to be put in the queue after a delay.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' when = (self._loop.time() + delay) existing = self._timers.get(item, None) if ((existing is None) or (existing[1] > when)): if (existing is not None): existing[0].cancel() if (delay > 0): self._timers[item] = (self._loop.call_at(when, self._put_delayed, item), when) else: self._timers.pop(item, None) self.put(item)<|docstring|>Schedule an item to be put in the queue after a delay. If the item is already scheduled, it will be rescheduled only if the delay would enqueue it sooner than the existing schedule.<|endoftext|>
1f601cb60adeee6849374b66720cfa21f4db034e5bd0fc31b22bb9a69d943b14
def put_backoff(self, item): 'Schedule an item to be put in the queue after a backoff.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' self.put_after(item, self.backoff.backoff(item))
Schedule an item to be put in the queue after a backoff. If the item is already scheduled, it will be rescheduled only if the delay would enqueue it sooner than the existing schedule.
dask-gateway-server/dask_gateway_server/workqueue.py
put_backoff
CrispyCrafter/dask-gateway
69
python
def put_backoff(self, item): 'Schedule an item to be put in the queue after a backoff.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' self.put_after(item, self.backoff.backoff(item))
def put_backoff(self, item): 'Schedule an item to be put in the queue after a backoff.\n\n If the item is already scheduled, it will be rescheduled only if the\n delay would enqueue it sooner than the existing schedule.\n ' self.put_after(item, self.backoff.backoff(item))<|docstring|>Schedule an item to be put in the queue after a backoff. If the item is already scheduled, it will be rescheduled only if the delay would enqueue it sooner than the existing schedule.<|endoftext|>
faae193dce3aeeea95166094ff32c46d5b9fd5904ea1dbed96aa29e6f0a74b0c
def failures(self, item): 'Get the number of failures seen for this item' return self.backoff.failures(item)
Get the number of failures seen for this item
dask-gateway-server/dask_gateway_server/workqueue.py
failures
CrispyCrafter/dask-gateway
69
python
def failures(self, item): return self.backoff.failures(item)
def failures(self, item): return self.backoff.failures(item)<|docstring|>Get the number of failures seen for this item<|endoftext|>
eb577479d514a5af70e885e94151b40c02ddd9bb587eb94ae7bfe9bcd318f31e
def reset_backoff(self, item): 'Reset the backoff for an item' self.backoff.reset(item)
Reset the backoff for an item
dask-gateway-server/dask_gateway_server/workqueue.py
reset_backoff
CrispyCrafter/dask-gateway
69
python
def reset_backoff(self, item): self.backoff.reset(item)
def reset_backoff(self, item): self.backoff.reset(item)<|docstring|>Reset the backoff for an item<|endoftext|>
24e01784088d0ecd21356896cd8b7bd6f7e131586bcc83e3f3818bbe4ddb4f39
async def get(self): 'Get an item from the queue.' while (not self._queue): if self.closed: raise WorkQueueClosed waiter = self._loop.create_future() self._waiting.append(waiter) try: (await waiter) except asyncio.CancelledError: try: self._waiting.remove(waiter) except ValueError: pass raise return self._get()
Get an item from the queue.
dask-gateway-server/dask_gateway_server/workqueue.py
get
CrispyCrafter/dask-gateway
69
python
async def get(self): while (not self._queue): if self.closed: raise WorkQueueClosed waiter = self._loop.create_future() self._waiting.append(waiter) try: (await waiter) except asyncio.CancelledError: try: self._waiting.remove(waiter) except ValueError: pass raise return self._get()
async def get(self): while (not self._queue): if self.closed: raise WorkQueueClosed waiter = self._loop.create_future() self._waiting.append(waiter) try: (await waiter) except asyncio.CancelledError: try: self._waiting.remove(waiter) except ValueError: pass raise return self._get()<|docstring|>Get an item from the queue.<|endoftext|>
a70e0df38e8534f1be3c201842c6f73b6f3aa11a3447a80e81f5c63be48cedf4
def task_done(self, item): 'Mark a task as done.\n\n This *must* be done before the item can be processed again.\n ' self._processing.discard(item) if (item in self._dirty): self._queue.append(item) self._wakeup_next()
Mark a task as done. This *must* be done before the item can be processed again.
dask-gateway-server/dask_gateway_server/workqueue.py
task_done
CrispyCrafter/dask-gateway
69
python
def task_done(self, item): 'Mark a task as done.\n\n This *must* be done before the item can be processed again.\n ' self._processing.discard(item) if (item in self._dirty): self._queue.append(item) self._wakeup_next()
def task_done(self, item): 'Mark a task as done.\n\n This *must* be done before the item can be processed again.\n ' self._processing.discard(item) if (item in self._dirty): self._queue.append(item) self._wakeup_next()<|docstring|>Mark a task as done. This *must* be done before the item can be processed again.<|endoftext|>
d5c2c903c031ce524c9cf47b4469c12ff016fce3920e5942add12c7d7f267bda
def close(self): 'Close the queue.\n\n Future calls to ``WorkQueue.get`` will raise ``WorkQueueClosed``\n ' self.closed = True self._wakeup_all()
Close the queue. Future calls to ``WorkQueue.get`` will raise ``WorkQueueClosed``
dask-gateway-server/dask_gateway_server/workqueue.py
close
CrispyCrafter/dask-gateway
69
python
def close(self): 'Close the queue.\n\n Future calls to ``WorkQueue.get`` will raise ``WorkQueueClosed``\n ' self.closed = True self._wakeup_all()
def close(self): 'Close the queue.\n\n Future calls to ``WorkQueue.get`` will raise ``WorkQueueClosed``\n ' self.closed = True self._wakeup_all()<|docstring|>Close the queue. Future calls to ``WorkQueue.get`` will raise ``WorkQueueClosed``<|endoftext|>
5fd7f03ad715ada996cae20c5e472a598b0512abd392b7cbeff88a1e8a3fcfb1
@functools.lru_cache() def get_attribute_classes() -> Dict[(str, Attribute)]: '\n Lookup all builtin Attribute subclasses, load them, and return a dict of\n attribute name -> class.\n ' attribute_children = pkgutil.iter_modules(importlib.import_module('lawu.attributes').__path__, prefix='lawu.attributes.') result = {} for (_, name, _) in attribute_children: classes = inspect.getmembers(importlib.import_module(name), (lambda c: (inspect.isclass(c) and issubclass(c, Attribute) and (c is not Attribute)))) for (class_name, class_) in classes: attribute_name = getattr(class_, 'ATTRIBUTE_NAME', class_name[:(- 9)]) result[attribute_name.lower()] = class_ return result
Lookup all builtin Attribute subclasses, load them, and return a dict of attribute name -> class.
lawu/attribute.py
get_attribute_classes
nickelpro/Lawu
31
python
@functools.lru_cache() def get_attribute_classes() -> Dict[(str, Attribute)]: '\n Lookup all builtin Attribute subclasses, load them, and return a dict of\n attribute name -> class.\n ' attribute_children = pkgutil.iter_modules(importlib.import_module('lawu.attributes').__path__, prefix='lawu.attributes.') result = {} for (_, name, _) in attribute_children: classes = inspect.getmembers(importlib.import_module(name), (lambda c: (inspect.isclass(c) and issubclass(c, Attribute) and (c is not Attribute)))) for (class_name, class_) in classes: attribute_name = getattr(class_, 'ATTRIBUTE_NAME', class_name[:(- 9)]) result[attribute_name.lower()] = class_ return result
@functools.lru_cache() def get_attribute_classes() -> Dict[(str, Attribute)]: '\n Lookup all builtin Attribute subclasses, load them, and return a dict of\n attribute name -> class.\n ' attribute_children = pkgutil.iter_modules(importlib.import_module('lawu.attributes').__path__, prefix='lawu.attributes.') result = {} for (_, name, _) in attribute_children: classes = inspect.getmembers(importlib.import_module(name), (lambda c: (inspect.isclass(c) and issubclass(c, Attribute) and (c is not Attribute)))) for (class_name, class_) in classes: attribute_name = getattr(class_, 'ATTRIBUTE_NAME', class_name[:(- 9)]) result[attribute_name.lower()] = class_ return result<|docstring|>Lookup all builtin Attribute subclasses, load them, and return a dict of attribute name -> class.<|endoftext|>
e284d3b289d2d9e8f8cccbde03c99abe5e764bce80390ad3c14bc91b771fcdbc
@staticmethod def from_binary(pool, source): 'Called when converting a ClassFile into an AST.' raise NotImplementedError()
Called when converting a ClassFile into an AST.
lawu/attribute.py
from_binary
nickelpro/Lawu
31
python
@staticmethod def from_binary(pool, source): raise NotImplementedError()
@staticmethod def from_binary(pool, source): raise NotImplementedError()<|docstring|>Called when converting a ClassFile into an AST.<|endoftext|>
fb3b9d3f7df7dea711129824f63de323bde393629e99030c738f7c240ef2bf93
def update(self): 'Update ticks' local = time.localtime(time.time()) self.timeSprite.add(Message((time.strftime('%H:%M:%S', local),), vector=(0, 0), fontsize=90, align='left', padding=0, fgcolor=(0, 0, 255))) surfaceRect = self.image.get_rect() self.timeSprite.sprite.rect.midbottom = surfaceRect.center self.timeSprite.draw(self.baseImage) self.dateSprite.add(Message((time.strftime('%Y-%m-%d', local),), vector=(0, 0), fontsize=25, align='left', padding=0, fgcolor=(0, 0, 255))) self.dateSprite.sprite.rect.midtop = self.timeSprite.sprite.rect.midbottom self.dateSprite.draw(self.baseImage)
Update ticks
faces/digitalclock.py
update
khan-git/pialarmclock
1
python
def update(self): local = time.localtime(time.time()) self.timeSprite.add(Message((time.strftime('%H:%M:%S', local),), vector=(0, 0), fontsize=90, align='left', padding=0, fgcolor=(0, 0, 255))) surfaceRect = self.image.get_rect() self.timeSprite.sprite.rect.midbottom = surfaceRect.center self.timeSprite.draw(self.baseImage) self.dateSprite.add(Message((time.strftime('%Y-%m-%d', local),), vector=(0, 0), fontsize=25, align='left', padding=0, fgcolor=(0, 0, 255))) self.dateSprite.sprite.rect.midtop = self.timeSprite.sprite.rect.midbottom self.dateSprite.draw(self.baseImage)
def update(self): local = time.localtime(time.time()) self.timeSprite.add(Message((time.strftime('%H:%M:%S', local),), vector=(0, 0), fontsize=90, align='left', padding=0, fgcolor=(0, 0, 255))) surfaceRect = self.image.get_rect() self.timeSprite.sprite.rect.midbottom = surfaceRect.center self.timeSprite.draw(self.baseImage) self.dateSprite.add(Message((time.strftime('%Y-%m-%d', local),), vector=(0, 0), fontsize=25, align='left', padding=0, fgcolor=(0, 0, 255))) self.dateSprite.sprite.rect.midtop = self.timeSprite.sprite.rect.midbottom self.dateSprite.draw(self.baseImage)<|docstring|>Update ticks<|endoftext|>
42d2440e1a3a098909a5d6c5fa5e44e3ee64f3b2b92026f8faef37b04453b817
def Diff_mat_r(Nr, r): "\n Args:\n Nr : number of points\n r : list of r's\n Returns:\n Dr_1d : d/dr\n rDr_1d : 1/r * d/dr\n D2r_1d : d^2/dr^2\n " Dr_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nr, Nr)) Dr_1d = sp.lil_matrix(Dr_1d) Dr_1d[(0, [0, 1, 2])] = [(- 3), 4, (- 1)] Dr_1d[((Nr - 1), [(Nr - 3), (Nr - 2), (Nr - 1)])] = [1, (- 4), 3] rDr_1d = Dr_1d.T.multiply((1 / r)).T D2r_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nr, Nr)) D2r_1d = sp.lil_matrix(D2r_1d) D2r_1d[(0, [0, 1, 2, 3])] = [2, (- 5), 4, (- 1)] D2r_1d[((Nr - 1), [(Nr - 4), (Nr - 3), (Nr - 2), (Nr - 1)])] = [(- 1), 4, (- 5), 2] return (Dr_1d, rDr_1d, D2r_1d)
Args: Nr : number of points r : list of r's Returns: Dr_1d : d/dr rDr_1d : 1/r * d/dr D2r_1d : d^2/dr^2
diff_matrices_polar.py
Diff_mat_r
itrosen/hall-solver
0
python
def Diff_mat_r(Nr, r): "\n Args:\n Nr : number of points\n r : list of r's\n Returns:\n Dr_1d : d/dr\n rDr_1d : 1/r * d/dr\n D2r_1d : d^2/dr^2\n " Dr_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nr, Nr)) Dr_1d = sp.lil_matrix(Dr_1d) Dr_1d[(0, [0, 1, 2])] = [(- 3), 4, (- 1)] Dr_1d[((Nr - 1), [(Nr - 3), (Nr - 2), (Nr - 1)])] = [1, (- 4), 3] rDr_1d = Dr_1d.T.multiply((1 / r)).T D2r_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nr, Nr)) D2r_1d = sp.lil_matrix(D2r_1d) D2r_1d[(0, [0, 1, 2, 3])] = [2, (- 5), 4, (- 1)] D2r_1d[((Nr - 1), [(Nr - 4), (Nr - 3), (Nr - 2), (Nr - 1)])] = [(- 1), 4, (- 5), 2] return (Dr_1d, rDr_1d, D2r_1d)
def Diff_mat_r(Nr, r): "\n Args:\n Nr : number of points\n r : list of r's\n Returns:\n Dr_1d : d/dr\n rDr_1d : 1/r * d/dr\n D2r_1d : d^2/dr^2\n " Dr_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nr, Nr)) Dr_1d = sp.lil_matrix(Dr_1d) Dr_1d[(0, [0, 1, 2])] = [(- 3), 4, (- 1)] Dr_1d[((Nr - 1), [(Nr - 3), (Nr - 2), (Nr - 1)])] = [1, (- 4), 3] rDr_1d = Dr_1d.T.multiply((1 / r)).T D2r_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nr, Nr)) D2r_1d = sp.lil_matrix(D2r_1d) D2r_1d[(0, [0, 1, 2, 3])] = [2, (- 5), 4, (- 1)] D2r_1d[((Nr - 1), [(Nr - 4), (Nr - 3), (Nr - 2), (Nr - 1)])] = [(- 1), 4, (- 5), 2] return (Dr_1d, rDr_1d, D2r_1d)<|docstring|>Args: Nr : number of points r : list of r's Returns: Dr_1d : d/dr rDr_1d : 1/r * d/dr D2r_1d : d^2/dr^2<|endoftext|>
e4257d9d74dffc9aaa83bbd0df131180b5df2d9df837be77cb4f07518479f63a
def Diff_mat_t(Nt): '\n Args:\n Nr : number of points\n Returns:\n Dt_1d : d/dt\n D2t_1d : d^2/dt^2\n ' Dt_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nt, Nt)) Dt_1d = sp.lil_matrix(Dt_1d) Dt_1d[(0, (- 1))] = [(- 1)] Dt_1d[((- 1), 0)] = [1] D2t_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nt, Nt)) D2t_1d = sp.lil_matrix(D2t_1d) D2t_1d[(0, (- 1))] = [1] D2t_1d[((- 1), 0)] = [1] return (Dt_1d, D2t_1d)
Args: Nr : number of points Returns: Dt_1d : d/dt D2t_1d : d^2/dt^2
diff_matrices_polar.py
Diff_mat_t
itrosen/hall-solver
0
python
def Diff_mat_t(Nt): '\n Args:\n Nr : number of points\n Returns:\n Dt_1d : d/dt\n D2t_1d : d^2/dt^2\n ' Dt_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nt, Nt)) Dt_1d = sp.lil_matrix(Dt_1d) Dt_1d[(0, (- 1))] = [(- 1)] Dt_1d[((- 1), 0)] = [1] D2t_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nt, Nt)) D2t_1d = sp.lil_matrix(D2t_1d) D2t_1d[(0, (- 1))] = [1] D2t_1d[((- 1), 0)] = [1] return (Dt_1d, D2t_1d)
def Diff_mat_t(Nt): '\n Args:\n Nr : number of points\n Returns:\n Dt_1d : d/dt\n D2t_1d : d^2/dt^2\n ' Dt_1d = sp.diags([(- 1), 1], [(- 1), 1], shape=(Nt, Nt)) Dt_1d = sp.lil_matrix(Dt_1d) Dt_1d[(0, (- 1))] = [(- 1)] Dt_1d[((- 1), 0)] = [1] D2t_1d = sp.diags([1, (- 2), 1], [(- 1), 0, 1], shape=(Nt, Nt)) D2t_1d = sp.lil_matrix(D2t_1d) D2t_1d[(0, (- 1))] = [1] D2t_1d[((- 1), 0)] = [1] return (Dt_1d, D2t_1d)<|docstring|>Args: Nr : number of points Returns: Dt_1d : d/dt D2t_1d : d^2/dt^2<|endoftext|>
03ff09f717870f334a50c314f5b41ef586a1e7feeebd47bae6c661a9790f5979
def Diff_mat_2D_polar(Nr, Nt, r): '\n Args:\n Nr : number of points in radial coordinate\n Nt : number of points in theta coordinate\n r : radial points\n Returns: \n Finite element matrices for the 2D space, in sparse format\n Dr_2d : d/dr\n rDr_2d : 1/r * d/dr\n d2r_2d : d^2/dr^2\n rDt_2d : 1/r * d/dt\n r2D2t_2d : 1/r^2 * d^2/dt^2\n ' (Dr_1d, rDr_1d, D2r_1d) = Diff_mat_r(Nr, r) (Dt_1d, D2t_1d) = Diff_mat_t(Nt) Ir = sp.eye(Nr) It = sp.eye(Nt) Rr = sp.spdiags([(1 / r)], [0], Nr, Nr) R2r = sp.spdiags([(1 / (r ** 2))], [0], Nr, Nr) Dr_2d = sp.kron(It, Dr_1d) rDr_2d = sp.kron(It, rDr_1d) D2r_2d = sp.kron(It, D2r_1d) rDt_2d = sp.kron(Dt_1d, Rr) r2D2t_2d = sp.kron(D2t_1d, R2r) return (Dr_2d.tocsr(), rDr_2d.tocsr(), D2r_2d.tocsr(), rDt_2d.tocsr(), r2D2t_2d.tocsr())
Args: Nr : number of points in radial coordinate Nt : number of points in theta coordinate r : radial points Returns: Finite element matrices for the 2D space, in sparse format Dr_2d : d/dr rDr_2d : 1/r * d/dr d2r_2d : d^2/dr^2 rDt_2d : 1/r * d/dt r2D2t_2d : 1/r^2 * d^2/dt^2
diff_matrices_polar.py
Diff_mat_2D_polar
itrosen/hall-solver
0
python
def Diff_mat_2D_polar(Nr, Nt, r): '\n Args:\n Nr : number of points in radial coordinate\n Nt : number of points in theta coordinate\n r : radial points\n Returns: \n Finite element matrices for the 2D space, in sparse format\n Dr_2d : d/dr\n rDr_2d : 1/r * d/dr\n d2r_2d : d^2/dr^2\n rDt_2d : 1/r * d/dt\n r2D2t_2d : 1/r^2 * d^2/dt^2\n ' (Dr_1d, rDr_1d, D2r_1d) = Diff_mat_r(Nr, r) (Dt_1d, D2t_1d) = Diff_mat_t(Nt) Ir = sp.eye(Nr) It = sp.eye(Nt) Rr = sp.spdiags([(1 / r)], [0], Nr, Nr) R2r = sp.spdiags([(1 / (r ** 2))], [0], Nr, Nr) Dr_2d = sp.kron(It, Dr_1d) rDr_2d = sp.kron(It, rDr_1d) D2r_2d = sp.kron(It, D2r_1d) rDt_2d = sp.kron(Dt_1d, Rr) r2D2t_2d = sp.kron(D2t_1d, R2r) return (Dr_2d.tocsr(), rDr_2d.tocsr(), D2r_2d.tocsr(), rDt_2d.tocsr(), r2D2t_2d.tocsr())
def Diff_mat_2D_polar(Nr, Nt, r): '\n Args:\n Nr : number of points in radial coordinate\n Nt : number of points in theta coordinate\n r : radial points\n Returns: \n Finite element matrices for the 2D space, in sparse format\n Dr_2d : d/dr\n rDr_2d : 1/r * d/dr\n d2r_2d : d^2/dr^2\n rDt_2d : 1/r * d/dt\n r2D2t_2d : 1/r^2 * d^2/dt^2\n ' (Dr_1d, rDr_1d, D2r_1d) = Diff_mat_r(Nr, r) (Dt_1d, D2t_1d) = Diff_mat_t(Nt) Ir = sp.eye(Nr) It = sp.eye(Nt) Rr = sp.spdiags([(1 / r)], [0], Nr, Nr) R2r = sp.spdiags([(1 / (r ** 2))], [0], Nr, Nr) Dr_2d = sp.kron(It, Dr_1d) rDr_2d = sp.kron(It, rDr_1d) D2r_2d = sp.kron(It, D2r_1d) rDt_2d = sp.kron(Dt_1d, Rr) r2D2t_2d = sp.kron(D2t_1d, R2r) return (Dr_2d.tocsr(), rDr_2d.tocsr(), D2r_2d.tocsr(), rDt_2d.tocsr(), r2D2t_2d.tocsr())<|docstring|>Args: Nr : number of points in radial coordinate Nt : number of points in theta coordinate r : radial points Returns: Finite element matrices for the 2D space, in sparse format Dr_2d : d/dr rDr_2d : 1/r * d/dr d2r_2d : d^2/dr^2 rDt_2d : 1/r * d/dt r2D2t_2d : 1/r^2 * d^2/dt^2<|endoftext|>
c37b8f94dde8de063721c631320612d425252da5d78ed04595e94df307947acc
def test_tagging_erg_sent(self): ' Test import tokens ' txt = 'In this way I am no doubt indirectly responsible for Dr. Grimesby Roylott\'s death, and I cannot say that it is likely to weigh very heavily upon my conscience." ' words = ['in', 'this', 'way', 'i', 'am', 'no', 'doubt', 'indirectly', 'responsible', 'for', 'dr.', 'Grimesby', 'Roylott', "'s", 'death', ',', 'and', 'i', 'can', 'not', 'say', 'that', 'it', 'is', 'likely', 'to', 'weigh', 'very', 'heavily', 'upon', 'my', 'conscience', '.', '"'] s = ttl.Sentence(txt) s._import_tokens(words) self.assertEqual(words, [x.text for x in s.tokens])
Test import tokens
test/test_ttlib.py
test_tagging_erg_sent
letuananh/chirptext
5
python
def test_tagging_erg_sent(self): ' ' txt = 'In this way I am no doubt indirectly responsible for Dr. Grimesby Roylott\'s death, and I cannot say that it is likely to weigh very heavily upon my conscience." ' words = ['in', 'this', 'way', 'i', 'am', 'no', 'doubt', 'indirectly', 'responsible', 'for', 'dr.', 'Grimesby', 'Roylott', "'s", 'death', ',', 'and', 'i', 'can', 'not', 'say', 'that', 'it', 'is', 'likely', 'to', 'weigh', 'very', 'heavily', 'upon', 'my', 'conscience', '.', '"'] s = ttl.Sentence(txt) s._import_tokens(words) self.assertEqual(words, [x.text for x in s.tokens])
def test_tagging_erg_sent(self): ' ' txt = 'In this way I am no doubt indirectly responsible for Dr. Grimesby Roylott\'s death, and I cannot say that it is likely to weigh very heavily upon my conscience." ' words = ['in', 'this', 'way', 'i', 'am', 'no', 'doubt', 'indirectly', 'responsible', 'for', 'dr.', 'Grimesby', 'Roylott', "'s", 'death', ',', 'and', 'i', 'can', 'not', 'say', 'that', 'it', 'is', 'likely', 'to', 'weigh', 'very', 'heavily', 'upon', 'my', 'conscience', '.', '"'] s = ttl.Sentence(txt) s._import_tokens(words) self.assertEqual(words, [x.text for x in s.tokens])<|docstring|>Test import tokens<|endoftext|>
7a155b17e0fda3c9ec9e8680746ed6bfbf79141f25594f287083b3bf822279cd
def weight_path(model_path): ' Get path of weights based on path to IR\n\n Params:\n model_path: the string contains path to IR file\n\n Return:\n Path to weights file\n ' assert model_path.endswith('.xml'), 'Wrong topology path was provided' return (model_path[:(- 3)] + 'bin')
Get path of weights based on path to IR Params: model_path: the string contains path to IR file Return: Path to weights file
modules/gapi/misc/python/samples/gaze_estimation.py
weight_path
badfilms/opencv
56,632
python
def weight_path(model_path): ' Get path of weights based on path to IR\n\n Params:\n model_path: the string contains path to IR file\n\n Return:\n Path to weights file\n ' assert model_path.endswith('.xml'), 'Wrong topology path was provided' return (model_path[:(- 3)] + 'bin')
def weight_path(model_path): ' Get path of weights based on path to IR\n\n Params:\n model_path: the string contains path to IR file\n\n Return:\n Path to weights file\n ' assert model_path.endswith('.xml'), 'Wrong topology path was provided' return (model_path[:(- 3)] + 'bin')<|docstring|>Get path of weights based on path to IR Params: model_path: the string contains path to IR file Return: Path to weights file<|endoftext|>
07a601fda718f4b1ea94d398d259bd7e80880d83ab930d40bce6b65bc989e657
def build_argparser(): ' Parse arguments from command line\n\n Return:\n Pack of arguments from command line\n ' parser = argparse.ArgumentParser(description='This is an OpenCV-based version of Gaze Estimation example') parser.add_argument('--input', help='Path to the input video file') parser.add_argument('--out', help='Path to the output video file') parser.add_argument('--facem', default='face-detection-retail-0005.xml', help='Path to OpenVINO face detection model (.xml)') parser.add_argument('--faced', default='CPU', help=('Target device for the face detection' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--headm', default='head-pose-estimation-adas-0001.xml', help='Path to OpenVINO head pose estimation model (.xml)') parser.add_argument('--headd', default='CPU', help=('Target device for the head pose estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--landm', default='facial-landmarks-35-adas-0002.xml', help='Path to OpenVINO landmarks detector model (.xml)') parser.add_argument('--landd', default='CPU', help='Target device for the landmarks detector (e.g. CPU, GPU, VPU, ...)') parser.add_argument('--gazem', default='gaze-estimation-adas-0002.xml', help='Path to OpenVINO gaze vector estimaiton model (.xml)') parser.add_argument('--gazed', default='CPU', help=('Target device for the gaze vector estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--eyem', default='open-closed-eye-0001.xml', help='Path to OpenVINO open closed eye model (.xml)') parser.add_argument('--eyed', default='CPU', help='Target device for the eyes state inference (e.g. CPU, GPU, VPU, ...)') return parser
Parse arguments from command line Return: Pack of arguments from command line
modules/gapi/misc/python/samples/gaze_estimation.py
build_argparser
badfilms/opencv
56,632
python
def build_argparser(): ' Parse arguments from command line\n\n Return:\n Pack of arguments from command line\n ' parser = argparse.ArgumentParser(description='This is an OpenCV-based version of Gaze Estimation example') parser.add_argument('--input', help='Path to the input video file') parser.add_argument('--out', help='Path to the output video file') parser.add_argument('--facem', default='face-detection-retail-0005.xml', help='Path to OpenVINO face detection model (.xml)') parser.add_argument('--faced', default='CPU', help=('Target device for the face detection' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--headm', default='head-pose-estimation-adas-0001.xml', help='Path to OpenVINO head pose estimation model (.xml)') parser.add_argument('--headd', default='CPU', help=('Target device for the head pose estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--landm', default='facial-landmarks-35-adas-0002.xml', help='Path to OpenVINO landmarks detector model (.xml)') parser.add_argument('--landd', default='CPU', help='Target device for the landmarks detector (e.g. CPU, GPU, VPU, ...)') parser.add_argument('--gazem', default='gaze-estimation-adas-0002.xml', help='Path to OpenVINO gaze vector estimaiton model (.xml)') parser.add_argument('--gazed', default='CPU', help=('Target device for the gaze vector estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--eyem', default='open-closed-eye-0001.xml', help='Path to OpenVINO open closed eye model (.xml)') parser.add_argument('--eyed', default='CPU', help='Target device for the eyes state inference (e.g. CPU, GPU, VPU, ...)') return parser
def build_argparser(): ' Parse arguments from command line\n\n Return:\n Pack of arguments from command line\n ' parser = argparse.ArgumentParser(description='This is an OpenCV-based version of Gaze Estimation example') parser.add_argument('--input', help='Path to the input video file') parser.add_argument('--out', help='Path to the output video file') parser.add_argument('--facem', default='face-detection-retail-0005.xml', help='Path to OpenVINO face detection model (.xml)') parser.add_argument('--faced', default='CPU', help=('Target device for the face detection' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--headm', default='head-pose-estimation-adas-0001.xml', help='Path to OpenVINO head pose estimation model (.xml)') parser.add_argument('--headd', default='CPU', help=('Target device for the head pose estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--landm', default='facial-landmarks-35-adas-0002.xml', help='Path to OpenVINO landmarks detector model (.xml)') parser.add_argument('--landd', default='CPU', help='Target device for the landmarks detector (e.g. CPU, GPU, VPU, ...)') parser.add_argument('--gazem', default='gaze-estimation-adas-0002.xml', help='Path to OpenVINO gaze vector estimaiton model (.xml)') parser.add_argument('--gazed', default='CPU', help=('Target device for the gaze vector estimation inference ' + '(e.g. CPU, GPU, VPU, ...)')) parser.add_argument('--eyem', default='open-closed-eye-0001.xml', help='Path to OpenVINO open closed eye model (.xml)') parser.add_argument('--eyed', default='CPU', help='Target device for the eyes state inference (e.g. CPU, GPU, VPU, ...)') return parser<|docstring|>Parse arguments from command line Return: Pack of arguments from command line<|endoftext|>
24e71050e980d58a1dccf053c55487a88f563e80eb0da790858282b47ef248a8
def intersection(surface, rect): ' Remove zone of out of bound from ROI\n\n Params:\n surface: image bounds is rect representation (top left coordinates and width and height)\n rect: region of interest is also has rect representation\n\n Return:\n Modified ROI with correct bounds\n ' l_x = max(surface[0], rect[0]) l_y = max(surface[1], rect[1]) width = (min((surface[0] + surface[2]), (rect[0] + rect[2])) - l_x) height = (min((surface[1] + surface[3]), (rect[1] + rect[3])) - l_y) if ((width < 0) or (height < 0)): return (0, 0, 0, 0) return (l_x, l_y, width, height)
Remove zone of out of bound from ROI Params: surface: image bounds is rect representation (top left coordinates and width and height) rect: region of interest is also has rect representation Return: Modified ROI with correct bounds
modules/gapi/misc/python/samples/gaze_estimation.py
intersection
badfilms/opencv
56,632
python
def intersection(surface, rect): ' Remove zone of out of bound from ROI\n\n Params:\n surface: image bounds is rect representation (top left coordinates and width and height)\n rect: region of interest is also has rect representation\n\n Return:\n Modified ROI with correct bounds\n ' l_x = max(surface[0], rect[0]) l_y = max(surface[1], rect[1]) width = (min((surface[0] + surface[2]), (rect[0] + rect[2])) - l_x) height = (min((surface[1] + surface[3]), (rect[1] + rect[3])) - l_y) if ((width < 0) or (height < 0)): return (0, 0, 0, 0) return (l_x, l_y, width, height)
def intersection(surface, rect): ' Remove zone of out of bound from ROI\n\n Params:\n surface: image bounds is rect representation (top left coordinates and width and height)\n rect: region of interest is also has rect representation\n\n Return:\n Modified ROI with correct bounds\n ' l_x = max(surface[0], rect[0]) l_y = max(surface[1], rect[1]) width = (min((surface[0] + surface[2]), (rect[0] + rect[2])) - l_x) height = (min((surface[1] + surface[3]), (rect[1] + rect[3])) - l_y) if ((width < 0) or (height < 0)): return (0, 0, 0, 0) return (l_x, l_y, width, height)<|docstring|>Remove zone of out of bound from ROI Params: surface: image bounds is rect representation (top left coordinates and width and height) rect: region of interest is also has rect representation Return: Modified ROI with correct bounds<|endoftext|>
c21cb23a2e5bdd56e04221a8a70e837a82b6ce34c493d992a13a5500c9e5e8d6
def process_landmarks(r_x, r_y, r_w, r_h, landmarks): ' Create points from result of inference of facial-landmarks network and size of input image\n\n Params:\n r_x: x coordinate of top left corner of input image\n r_y: y coordinate of top left corner of input image\n r_w: width of input image\n r_h: height of input image\n landmarks: result of inference of facial-landmarks network\n\n Return:\n Array of landmarks points for one face\n ' lmrks = landmarks[0] raw_x = ((lmrks[::2] * r_w) + r_x) raw_y = ((lmrks[1::2] * r_h) + r_y) return np.array([[int(x), int(y)] for (x, y) in zip(raw_x, raw_y)])
Create points from result of inference of facial-landmarks network and size of input image Params: r_x: x coordinate of top left corner of input image r_y: y coordinate of top left corner of input image r_w: width of input image r_h: height of input image landmarks: result of inference of facial-landmarks network Return: Array of landmarks points for one face
modules/gapi/misc/python/samples/gaze_estimation.py
process_landmarks
badfilms/opencv
56,632
python
def process_landmarks(r_x, r_y, r_w, r_h, landmarks): ' Create points from result of inference of facial-landmarks network and size of input image\n\n Params:\n r_x: x coordinate of top left corner of input image\n r_y: y coordinate of top left corner of input image\n r_w: width of input image\n r_h: height of input image\n landmarks: result of inference of facial-landmarks network\n\n Return:\n Array of landmarks points for one face\n ' lmrks = landmarks[0] raw_x = ((lmrks[::2] * r_w) + r_x) raw_y = ((lmrks[1::2] * r_h) + r_y) return np.array([[int(x), int(y)] for (x, y) in zip(raw_x, raw_y)])
def process_landmarks(r_x, r_y, r_w, r_h, landmarks): ' Create points from result of inference of facial-landmarks network and size of input image\n\n Params:\n r_x: x coordinate of top left corner of input image\n r_y: y coordinate of top left corner of input image\n r_w: width of input image\n r_h: height of input image\n landmarks: result of inference of facial-landmarks network\n\n Return:\n Array of landmarks points for one face\n ' lmrks = landmarks[0] raw_x = ((lmrks[::2] * r_w) + r_x) raw_y = ((lmrks[1::2] * r_h) + r_y) return np.array([[int(x), int(y)] for (x, y) in zip(raw_x, raw_y)])<|docstring|>Create points from result of inference of facial-landmarks network and size of input image Params: r_x: x coordinate of top left corner of input image r_y: y coordinate of top left corner of input image r_w: width of input image r_h: height of input image landmarks: result of inference of facial-landmarks network Return: Array of landmarks points for one face<|endoftext|>
d15a9977860a98db3ed710442de472f133726d1f0f72d3107865361ac8061535
def eye_box(p_1, p_2, scale=1.8): ' Get bounding box of eye\n\n Params:\n p_1: point of left edge of eye\n p_2: point of right edge of eye\n scale: change size of box with this value\n\n Return:\n Bounding box of eye and its midpoint\n ' size = np.linalg.norm((p_1 - p_2)) midpoint = ((p_1 + p_2) / 2) width = (scale * size) height = width p_x = (midpoint[0] - (width / 2)) p_y = (midpoint[1] - (height / 2)) return ((int(p_x), int(p_y), int(width), int(height)), list(map(int, midpoint)))
Get bounding box of eye Params: p_1: point of left edge of eye p_2: point of right edge of eye scale: change size of box with this value Return: Bounding box of eye and its midpoint
modules/gapi/misc/python/samples/gaze_estimation.py
eye_box
badfilms/opencv
56,632
python
def eye_box(p_1, p_2, scale=1.8): ' Get bounding box of eye\n\n Params:\n p_1: point of left edge of eye\n p_2: point of right edge of eye\n scale: change size of box with this value\n\n Return:\n Bounding box of eye and its midpoint\n ' size = np.linalg.norm((p_1 - p_2)) midpoint = ((p_1 + p_2) / 2) width = (scale * size) height = width p_x = (midpoint[0] - (width / 2)) p_y = (midpoint[1] - (height / 2)) return ((int(p_x), int(p_y), int(width), int(height)), list(map(int, midpoint)))
def eye_box(p_1, p_2, scale=1.8): ' Get bounding box of eye\n\n Params:\n p_1: point of left edge of eye\n p_2: point of right edge of eye\n scale: change size of box with this value\n\n Return:\n Bounding box of eye and its midpoint\n ' size = np.linalg.norm((p_1 - p_2)) midpoint = ((p_1 + p_2) / 2) width = (scale * size) height = width p_x = (midpoint[0] - (width / 2)) p_y = (midpoint[1] - (height / 2)) return ((int(p_x), int(p_y), int(width), int(height)), list(map(int, midpoint)))<|docstring|>Get bounding box of eye Params: p_1: point of left edge of eye p_2: point of right edge of eye scale: change size of box with this value Return: Bounding box of eye and its midpoint<|endoftext|>
518d054721de28bc2de7213023d484a4e52098ef9396c37963eb8234edc955a5
@staticmethod def run(in_ys, in_ps, in_rs): ' Сustom kernel executable code\n\n Params:\n in_ys: yaw angle of head\n in_ps: pitch angle of head\n in_rs: roll angle of head\n\n Return:\n Arrays with heads poses\n ' return [np.array([ys[0], ps[0], rs[0]]).T for (ys, ps, rs) in zip(in_ys, in_ps, in_rs)]
Сustom kernel executable code Params: in_ys: yaw angle of head in_ps: pitch angle of head in_rs: roll angle of head Return: Arrays with heads poses
modules/gapi/misc/python/samples/gaze_estimation.py
run
badfilms/opencv
56,632
python
@staticmethod def run(in_ys, in_ps, in_rs): ' Сustom kernel executable code\n\n Params:\n in_ys: yaw angle of head\n in_ps: pitch angle of head\n in_rs: roll angle of head\n\n Return:\n Arrays with heads poses\n ' return [np.array([ys[0], ps[0], rs[0]]).T for (ys, ps, rs) in zip(in_ys, in_ps, in_rs)]
@staticmethod def run(in_ys, in_ps, in_rs): ' Сustom kernel executable code\n\n Params:\n in_ys: yaw angle of head\n in_ps: pitch angle of head\n in_rs: roll angle of head\n\n Return:\n Arrays with heads poses\n ' return [np.array([ys[0], ps[0], rs[0]]).T for (ys, ps, rs) in zip(in_ys, in_ps, in_rs)]<|docstring|>Сustom kernel executable code Params: in_ys: yaw angle of head in_ps: pitch angle of head in_rs: roll angle of head Return: Arrays with heads poses<|endoftext|>
da2c85b431d473464923ba78f0e32ea34253f608127ebffe1dd36d5af6875441
@staticmethod def run(in_landm_per_face, in_face_rcs, frame_size): ' Сustom kernel executable code\n\n Params:\n in_landm_per_face: landmarks from inference of facial-landmarks network for each face\n in_face_rcs: bounding boxes for each face\n frame_size: size of input image\n\n Return:\n Arrays of ROI for left and right eyes, array of midpoints and\n array of landmarks points\n ' left_eyes = [] right_eyes = [] midpoints = [] lmarks = [] surface = (0, 0, *frame_size) for (landm_face, rect) in zip(in_landm_per_face, in_face_rcs): points = process_landmarks(*rect, landm_face) lmarks.extend(points) (rect, midpoint_l) = eye_box(points[0], points[1]) left_eyes.append(intersection(surface, rect)) (rect, midpoint_r) = eye_box(points[2], points[3]) right_eyes.append(intersection(surface, rect)) midpoints.append(midpoint_l) midpoints.append(midpoint_r) return (left_eyes, right_eyes, midpoints, lmarks)
Сustom kernel executable code Params: in_landm_per_face: landmarks from inference of facial-landmarks network for each face in_face_rcs: bounding boxes for each face frame_size: size of input image Return: Arrays of ROI for left and right eyes, array of midpoints and array of landmarks points
modules/gapi/misc/python/samples/gaze_estimation.py
run
badfilms/opencv
56,632
python
@staticmethod def run(in_landm_per_face, in_face_rcs, frame_size): ' Сustom kernel executable code\n\n Params:\n in_landm_per_face: landmarks from inference of facial-landmarks network for each face\n in_face_rcs: bounding boxes for each face\n frame_size: size of input image\n\n Return:\n Arrays of ROI for left and right eyes, array of midpoints and\n array of landmarks points\n ' left_eyes = [] right_eyes = [] midpoints = [] lmarks = [] surface = (0, 0, *frame_size) for (landm_face, rect) in zip(in_landm_per_face, in_face_rcs): points = process_landmarks(*rect, landm_face) lmarks.extend(points) (rect, midpoint_l) = eye_box(points[0], points[1]) left_eyes.append(intersection(surface, rect)) (rect, midpoint_r) = eye_box(points[2], points[3]) right_eyes.append(intersection(surface, rect)) midpoints.append(midpoint_l) midpoints.append(midpoint_r) return (left_eyes, right_eyes, midpoints, lmarks)
@staticmethod def run(in_landm_per_face, in_face_rcs, frame_size): ' Сustom kernel executable code\n\n Params:\n in_landm_per_face: landmarks from inference of facial-landmarks network for each face\n in_face_rcs: bounding boxes for each face\n frame_size: size of input image\n\n Return:\n Arrays of ROI for left and right eyes, array of midpoints and\n array of landmarks points\n ' left_eyes = [] right_eyes = [] midpoints = [] lmarks = [] surface = (0, 0, *frame_size) for (landm_face, rect) in zip(in_landm_per_face, in_face_rcs): points = process_landmarks(*rect, landm_face) lmarks.extend(points) (rect, midpoint_l) = eye_box(points[0], points[1]) left_eyes.append(intersection(surface, rect)) (rect, midpoint_r) = eye_box(points[2], points[3]) right_eyes.append(intersection(surface, rect)) midpoints.append(midpoint_l) midpoints.append(midpoint_r) return (left_eyes, right_eyes, midpoints, lmarks)<|docstring|>Сustom kernel executable code Params: in_landm_per_face: landmarks from inference of facial-landmarks network for each face in_face_rcs: bounding boxes for each face frame_size: size of input image Return: Arrays of ROI for left and right eyes, array of midpoints and array of landmarks points<|endoftext|>
36472f12893f824518e1eee9bf4e236b86a40deba9ab7330482e7ed3c4bfe771
@staticmethod def run(eyesl, eyesr): ' Сustom kernel executable code\n\n Params:\n eyesl: result of inference of open-closed-eye network for left eye\n eyesr: result of inference of open-closed-eye network for right eye\n\n Return:\n States of left eyes and states of right eyes\n ' out_l_st = [int(st) for eye_l in eyesl for st in (eye_l[(:, 0)] < eye_l[(:, 1)]).ravel()] out_r_st = [int(st) for eye_r in eyesr for st in (eye_r[(:, 0)] < eye_r[(:, 1)]).ravel()] return (out_l_st, out_r_st)
Сustom kernel executable code Params: eyesl: result of inference of open-closed-eye network for left eye eyesr: result of inference of open-closed-eye network for right eye Return: States of left eyes and states of right eyes
modules/gapi/misc/python/samples/gaze_estimation.py
run
badfilms/opencv
56,632
python
@staticmethod def run(eyesl, eyesr): ' Сustom kernel executable code\n\n Params:\n eyesl: result of inference of open-closed-eye network for left eye\n eyesr: result of inference of open-closed-eye network for right eye\n\n Return:\n States of left eyes and states of right eyes\n ' out_l_st = [int(st) for eye_l in eyesl for st in (eye_l[(:, 0)] < eye_l[(:, 1)]).ravel()] out_r_st = [int(st) for eye_r in eyesr for st in (eye_r[(:, 0)] < eye_r[(:, 1)]).ravel()] return (out_l_st, out_r_st)
@staticmethod def run(eyesl, eyesr): ' Сustom kernel executable code\n\n Params:\n eyesl: result of inference of open-closed-eye network for left eye\n eyesr: result of inference of open-closed-eye network for right eye\n\n Return:\n States of left eyes and states of right eyes\n ' out_l_st = [int(st) for eye_l in eyesl for st in (eye_l[(:, 0)] < eye_l[(:, 1)]).ravel()] out_r_st = [int(st) for eye_r in eyesr for st in (eye_r[(:, 0)] < eye_r[(:, 1)]).ravel()] return (out_l_st, out_r_st)<|docstring|>Сustom kernel executable code Params: eyesl: result of inference of open-closed-eye network for left eye eyesr: result of inference of open-closed-eye network for right eye Return: States of left eyes and states of right eyes<|endoftext|>
8a3bb0fda7abca0a31b1e3d237b84e5c1dd3eda107622116855d97a480c2f859
@pytest.fixture(scope='session') def engine(): 'Create the engine.' return create_engine('postgresql://localhost/pollbot_test')
Create the engine.
tests/conftest.py
engine
shubham-king/poll
112
python
@pytest.fixture(scope='session') def engine(): return create_engine('postgresql://localhost/pollbot_test')
@pytest.fixture(scope='session') def engine(): return create_engine('postgresql://localhost/pollbot_test')<|docstring|>Create the engine.<|endoftext|>
fc03157f0927a3c75e178bbc812a8f85c0f35ef69a952194a8cb7fe5625de707
@pytest.fixture(scope='session') def tables(engine): 'Create the base schema.' with engine.connect() as con: con.execute('CREATE EXTENSION IF NOT EXISTS pg_trgm;') con.execute('CREATE EXTENSION IF NOT EXISTS pgcrypto;') base.metadata.create_all(engine) (yield) base.metadata.drop_all(engine)
Create the base schema.
tests/conftest.py
tables
shubham-king/poll
112
python
@pytest.fixture(scope='session') def tables(engine): with engine.connect() as con: con.execute('CREATE EXTENSION IF NOT EXISTS pg_trgm;') con.execute('CREATE EXTENSION IF NOT EXISTS pgcrypto;') base.metadata.create_all(engine) (yield) base.metadata.drop_all(engine)
@pytest.fixture(scope='session') def tables(engine): with engine.connect() as con: con.execute('CREATE EXTENSION IF NOT EXISTS pg_trgm;') con.execute('CREATE EXTENSION IF NOT EXISTS pgcrypto;') base.metadata.create_all(engine) (yield) base.metadata.drop_all(engine)<|docstring|>Create the base schema.<|endoftext|>
6226b485d584566850a4c240f3c54215815aee78b55132dc7d812e044a559bb2
@pytest.fixture def connection(engine, tables): 'Create the connection for the test case.' connection = engine.connect() (yield connection)
Create the connection for the test case.
tests/conftest.py
connection
shubham-king/poll
112
python
@pytest.fixture def connection(engine, tables): connection = engine.connect() (yield connection)
@pytest.fixture def connection(engine, tables): connection = engine.connect() (yield connection)<|docstring|>Create the connection for the test case.<|endoftext|>
7ed6c1ef489c1efd699a9d3a04b5eb72e6d33c04367fdb530cd119a40af377b7
@pytest.fixture def session(connection, monkeypatch): 'Return an sqlalchemy session, and after the test tear down everything properly.' transaction = connection.begin() session = Session(bind=connection) def get_session(): return session from pollbot import db monkeypatch.setattr(db, 'get_session', get_session) assert (session == db.get_session()) (yield session) try: connection.execute('SET CONSTRAINTS ALL IMMEDIATE') except InternalError: pass session.close() transaction.rollback() connection.close()
Return an sqlalchemy session, and after the test tear down everything properly.
tests/conftest.py
session
shubham-king/poll
112
python
@pytest.fixture def session(connection, monkeypatch): transaction = connection.begin() session = Session(bind=connection) def get_session(): return session from pollbot import db monkeypatch.setattr(db, 'get_session', get_session) assert (session == db.get_session()) (yield session) try: connection.execute('SET CONSTRAINTS ALL IMMEDIATE') except InternalError: pass session.close() transaction.rollback() connection.close()
@pytest.fixture def session(connection, monkeypatch): transaction = connection.begin() session = Session(bind=connection) def get_session(): return session from pollbot import db monkeypatch.setattr(db, 'get_session', get_session) assert (session == db.get_session()) (yield session) try: connection.execute('SET CONSTRAINTS ALL IMMEDIATE') except InternalError: pass session.close() transaction.rollback() connection.close()<|docstring|>Return an sqlalchemy session, and after the test tear down everything properly.<|endoftext|>
fce75b9ab6cea6d91af17dbe821dd08629cc4c5050eb78e0878ddb8754654b4d
def register(self, model=None, include_fields=[], exclude_fields=[], mapping_fields={}): "\n Register a model with auditlog. Auditlog will then track mutations on this model's instances.\n\n :param model: The model to register.\n :type model: Model\n :param include_fields: The fields to include. Implicitly excludes all other fields.\n :type include_fields: list\n :param exclude_fields: The fields to exclude. Overrides the fields to include.\n :type exclude_fields: list\n " def registrar(cls): 'Register models for a given class.' if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls if (model is None): return (lambda cls: registrar(cls)) else: registrar(model)
Register a model with auditlog. Auditlog will then track mutations on this model's instances. :param model: The model to register. :type model: Model :param include_fields: The fields to include. Implicitly excludes all other fields. :type include_fields: list :param exclude_fields: The fields to exclude. Overrides the fields to include. :type exclude_fields: list
src/auditlog/registry.py
register
mathspace/django-auditlog
0
python
def register(self, model=None, include_fields=[], exclude_fields=[], mapping_fields={}): "\n Register a model with auditlog. Auditlog will then track mutations on this model's instances.\n\n :param model: The model to register.\n :type model: Model\n :param include_fields: The fields to include. Implicitly excludes all other fields.\n :type include_fields: list\n :param exclude_fields: The fields to exclude. Overrides the fields to include.\n :type exclude_fields: list\n " def registrar(cls): 'Register models for a given class.' if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls if (model is None): return (lambda cls: registrar(cls)) else: registrar(model)
def register(self, model=None, include_fields=[], exclude_fields=[], mapping_fields={}): "\n Register a model with auditlog. Auditlog will then track mutations on this model's instances.\n\n :param model: The model to register.\n :type model: Model\n :param include_fields: The fields to include. Implicitly excludes all other fields.\n :type include_fields: list\n :param exclude_fields: The fields to exclude. Overrides the fields to include.\n :type exclude_fields: list\n " def registrar(cls): 'Register models for a given class.' if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls if (model is None): return (lambda cls: registrar(cls)) else: registrar(model)<|docstring|>Register a model with auditlog. Auditlog will then track mutations on this model's instances. :param model: The model to register. :type model: Model :param include_fields: The fields to include. Implicitly excludes all other fields. :type include_fields: list :param exclude_fields: The fields to exclude. Overrides the fields to include. :type exclude_fields: list<|endoftext|>
596c74b6eff4d47c9ca8105efd2f7ec9102cee1cc9cebb25ca85b10e060256b3
def contains(self, model): '\n Check if a model is registered with auditlog.\n\n :param model: The model to check.\n :type model: Model\n :return: Whether the model has been registered.\n :rtype: bool\n ' return (model in self._registry)
Check if a model is registered with auditlog. :param model: The model to check. :type model: Model :return: Whether the model has been registered. :rtype: bool
src/auditlog/registry.py
contains
mathspace/django-auditlog
0
python
def contains(self, model): '\n Check if a model is registered with auditlog.\n\n :param model: The model to check.\n :type model: Model\n :return: Whether the model has been registered.\n :rtype: bool\n ' return (model in self._registry)
def contains(self, model): '\n Check if a model is registered with auditlog.\n\n :param model: The model to check.\n :type model: Model\n :return: Whether the model has been registered.\n :rtype: bool\n ' return (model in self._registry)<|docstring|>Check if a model is registered with auditlog. :param model: The model to check. :type model: Model :return: Whether the model has been registered. :rtype: bool<|endoftext|>
129fcb3099697693e13e9455667e39d89509e859a0de55c2f45dfba1aac68055
def unregister(self, model): '\n Unregister a model with auditlog. This will not affect the database.\n\n :param model: The model to unregister.\n :type model: Model\n ' try: del self._registry[model] except KeyError: pass else: self._disconnect_signals(model)
Unregister a model with auditlog. This will not affect the database. :param model: The model to unregister. :type model: Model
src/auditlog/registry.py
unregister
mathspace/django-auditlog
0
python
def unregister(self, model): '\n Unregister a model with auditlog. This will not affect the database.\n\n :param model: The model to unregister.\n :type model: Model\n ' try: del self._registry[model] except KeyError: pass else: self._disconnect_signals(model)
def unregister(self, model): '\n Unregister a model with auditlog. This will not affect the database.\n\n :param model: The model to unregister.\n :type model: Model\n ' try: del self._registry[model] except KeyError: pass else: self._disconnect_signals(model)<|docstring|>Unregister a model with auditlog. This will not affect the database. :param model: The model to unregister. :type model: Model<|endoftext|>
71b86384cbabd736d82f72ecb3e7d29f22732b6e641f442b7b515e081ce6b470
def _connect_signals(self, model): '\n Connect signals for the model.\n ' if user_settings.get('disable_auditlog', False): return for signal in self._signals: receiver = self._signals[signal] signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model))
Connect signals for the model.
src/auditlog/registry.py
_connect_signals
mathspace/django-auditlog
0
python
def _connect_signals(self, model): '\n \n ' if user_settings.get('disable_auditlog', False): return for signal in self._signals: receiver = self._signals[signal] signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model))
def _connect_signals(self, model): '\n \n ' if user_settings.get('disable_auditlog', False): return for signal in self._signals: receiver = self._signals[signal] signal.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model))<|docstring|>Connect signals for the model.<|endoftext|>
7be4cabea09d860b3debad088e88030b2265d53d61db3b66d3b2c6be5b9a08bd
def _disconnect_signals(self, model): '\n Disconnect signals for the model.\n ' if user_settings.get('disable_auditlog', False): return for (signal, receiver) in self._signals.items(): signal.disconnect(sender=model, dispatch_uid=self._dispatch_uid(signal, model))
Disconnect signals for the model.
src/auditlog/registry.py
_disconnect_signals
mathspace/django-auditlog
0
python
def _disconnect_signals(self, model): '\n \n ' if user_settings.get('disable_auditlog', False): return for (signal, receiver) in self._signals.items(): signal.disconnect(sender=model, dispatch_uid=self._dispatch_uid(signal, model))
def _disconnect_signals(self, model): '\n \n ' if user_settings.get('disable_auditlog', False): return for (signal, receiver) in self._signals.items(): signal.disconnect(sender=model, dispatch_uid=self._dispatch_uid(signal, model))<|docstring|>Disconnect signals for the model.<|endoftext|>
4907bf6b94401b18d531385ff3ca9eacef6e0801d56b9e39820e8cbee32b7029
def _dispatch_uid(self, signal, model): '\n Generate a dispatch_uid.\n ' return (self.__class__, model, signal)
Generate a dispatch_uid.
src/auditlog/registry.py
_dispatch_uid
mathspace/django-auditlog
0
python
def _dispatch_uid(self, signal, model): '\n \n ' return (self.__class__, model, signal)
def _dispatch_uid(self, signal, model): '\n \n ' return (self.__class__, model, signal)<|docstring|>Generate a dispatch_uid.<|endoftext|>
07d4545a2738745db51a029de56a2c6cdccaf5d790420daac076b45a2b1ddc32
def registrar(cls): 'Register models for a given class.' if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls
Register models for a given class.
src/auditlog/registry.py
registrar
mathspace/django-auditlog
0
python
def registrar(cls): if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls
def registrar(cls): if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') self._registry[cls] = {'include_fields': include_fields, 'exclude_fields': exclude_fields, 'mapping_fields': mapping_fields} self._connect_signals(cls) return cls<|docstring|>Register models for a given class.<|endoftext|>
f2f5d2db413c8c17d9f114e8db7a209a8b2c88d9578e68e20c9bedc5f2085d38
def registrar(cls): 'Register models for a given class.' if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') if user_settings.get('disable_auditlog', False): return receiver = self._signals[m2m_changed] m2m_changed.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(m2m_changed, model)) return cls
Register models for a given class.
src/auditlog/registry.py
registrar
mathspace/django-auditlog
0
python
def registrar(cls): if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') if user_settings.get('disable_auditlog', False): return receiver = self._signals[m2m_changed] m2m_changed.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(m2m_changed, model)) return cls
def registrar(cls): if (not issubclass(cls, Model)): raise TypeError('Supplied model is not a valid model.') if user_settings.get('disable_auditlog', False): return receiver = self._signals[m2m_changed] m2m_changed.connect(receiver, sender=model, dispatch_uid=self._dispatch_uid(m2m_changed, model)) return cls<|docstring|>Register models for a given class.<|endoftext|>
1a3566d8b67d22818936023a2ee7c6d08e13a4516f53f7964451b116222d48c8
def _verify_patchelf() -> None: "This function looks for the ``patchelf`` external binary in the PATH,\n checks for the required version, and throws an exception if a proper\n version can't be found. Otherwise, silcence is golden\n " if (not find_executable('patchelf')): raise ValueError('Cannot find required utility `patchelf` in PATH') try: version = check_output(['patchelf', '--version']).decode('utf-8') except CalledProcessError: raise ValueError('Could not call `patchelf` binary') m = re.match('patchelf\\s+(\\d+(.\\d+)?)', version) if (m and (tuple((int(x) for x in m.group(1).split('.'))) >= (0, 9))): return raise ValueError(('patchelf %s found. auditwheel repair requires patchelf >= 0.9.' % version))
This function looks for the ``patchelf`` external binary in the PATH, checks for the required version, and throws an exception if a proper version can't be found. Otherwise, silcence is golden
src/auditwheel/patcher.py
_verify_patchelf
f3flight/auditwheel
280
python
def _verify_patchelf() -> None: "This function looks for the ``patchelf`` external binary in the PATH,\n checks for the required version, and throws an exception if a proper\n version can't be found. Otherwise, silcence is golden\n " if (not find_executable('patchelf')): raise ValueError('Cannot find required utility `patchelf` in PATH') try: version = check_output(['patchelf', '--version']).decode('utf-8') except CalledProcessError: raise ValueError('Could not call `patchelf` binary') m = re.match('patchelf\\s+(\\d+(.\\d+)?)', version) if (m and (tuple((int(x) for x in m.group(1).split('.'))) >= (0, 9))): return raise ValueError(('patchelf %s found. auditwheel repair requires patchelf >= 0.9.' % version))
def _verify_patchelf() -> None: "This function looks for the ``patchelf`` external binary in the PATH,\n checks for the required version, and throws an exception if a proper\n version can't be found. Otherwise, silcence is golden\n " if (not find_executable('patchelf')): raise ValueError('Cannot find required utility `patchelf` in PATH') try: version = check_output(['patchelf', '--version']).decode('utf-8') except CalledProcessError: raise ValueError('Could not call `patchelf` binary') m = re.match('patchelf\\s+(\\d+(.\\d+)?)', version) if (m and (tuple((int(x) for x in m.group(1).split('.'))) >= (0, 9))): return raise ValueError(('patchelf %s found. auditwheel repair requires patchelf >= 0.9.' % version))<|docstring|>This function looks for the ``patchelf`` external binary in the PATH, checks for the required version, and throws an exception if a proper version can't be found. Otherwise, silcence is golden<|endoftext|>
7229fcfdf609fa22f30e349dc6cd99a9a50310f798bc4c69f0e3484d9fb4fabb
@overrides def optimize_policy(self, itr, samples_data): '\n Perform algorithm optimizing.\n\n Returns:\n action_loss: Loss of action predicted by the policy network.\n qval_loss: Loss of q value predicted by the q network.\n ys: y_s.\n qval: Q value predicted by the q network.\n\n ' transitions = self.replay_buffer.sample(self.buffer_batch_size) observations = transitions['observation'] rewards = transitions['reward'] actions = transitions['action'] next_observations = transitions['next_observation'] terminals = transitions['terminal'] rewards = rewards.reshape((- 1), 1) terminals = terminals.reshape((- 1), 1) if self.input_include_goal: goals = transitions['goal'] next_inputs = np.concatenate((next_observations, goals), axis=(- 1)) inputs = np.concatenate((observations, goals), axis=(- 1)) else: next_inputs = next_observations inputs = observations target_actions = self.target_policy_f_prob_online(next_inputs) target_qvals = self.target_qf_f_prob_online(next_inputs, target_actions) clip_range = ((- self.clip_return), (0.0 if self.clip_pos_returns else self.clip_return)) ys = np.clip((rewards + (((1.0 - terminals) * self.discount) * target_qvals)), clip_range[0], clip_range[1]) (_, qval_loss, qval) = self.f_train_qf(ys, inputs, actions) (_, action_loss) = self.f_train_policy(inputs) self.f_update_target() return (qval_loss, ys, qval, action_loss)
Perform algorithm optimizing. Returns: action_loss: Loss of action predicted by the policy network. qval_loss: Loss of q value predicted by the q network. ys: y_s. qval: Q value predicted by the q network.
src/garage/tf/algos/ddpg.py
optimize_policy
lywong92/garage
1
python
@overrides def optimize_policy(self, itr, samples_data): '\n Perform algorithm optimizing.\n\n Returns:\n action_loss: Loss of action predicted by the policy network.\n qval_loss: Loss of q value predicted by the q network.\n ys: y_s.\n qval: Q value predicted by the q network.\n\n ' transitions = self.replay_buffer.sample(self.buffer_batch_size) observations = transitions['observation'] rewards = transitions['reward'] actions = transitions['action'] next_observations = transitions['next_observation'] terminals = transitions['terminal'] rewards = rewards.reshape((- 1), 1) terminals = terminals.reshape((- 1), 1) if self.input_include_goal: goals = transitions['goal'] next_inputs = np.concatenate((next_observations, goals), axis=(- 1)) inputs = np.concatenate((observations, goals), axis=(- 1)) else: next_inputs = next_observations inputs = observations target_actions = self.target_policy_f_prob_online(next_inputs) target_qvals = self.target_qf_f_prob_online(next_inputs, target_actions) clip_range = ((- self.clip_return), (0.0 if self.clip_pos_returns else self.clip_return)) ys = np.clip((rewards + (((1.0 - terminals) * self.discount) * target_qvals)), clip_range[0], clip_range[1]) (_, qval_loss, qval) = self.f_train_qf(ys, inputs, actions) (_, action_loss) = self.f_train_policy(inputs) self.f_update_target() return (qval_loss, ys, qval, action_loss)
@overrides def optimize_policy(self, itr, samples_data): '\n Perform algorithm optimizing.\n\n Returns:\n action_loss: Loss of action predicted by the policy network.\n qval_loss: Loss of q value predicted by the q network.\n ys: y_s.\n qval: Q value predicted by the q network.\n\n ' transitions = self.replay_buffer.sample(self.buffer_batch_size) observations = transitions['observation'] rewards = transitions['reward'] actions = transitions['action'] next_observations = transitions['next_observation'] terminals = transitions['terminal'] rewards = rewards.reshape((- 1), 1) terminals = terminals.reshape((- 1), 1) if self.input_include_goal: goals = transitions['goal'] next_inputs = np.concatenate((next_observations, goals), axis=(- 1)) inputs = np.concatenate((observations, goals), axis=(- 1)) else: next_inputs = next_observations inputs = observations target_actions = self.target_policy_f_prob_online(next_inputs) target_qvals = self.target_qf_f_prob_online(next_inputs, target_actions) clip_range = ((- self.clip_return), (0.0 if self.clip_pos_returns else self.clip_return)) ys = np.clip((rewards + (((1.0 - terminals) * self.discount) * target_qvals)), clip_range[0], clip_range[1]) (_, qval_loss, qval) = self.f_train_qf(ys, inputs, actions) (_, action_loss) = self.f_train_policy(inputs) self.f_update_target() return (qval_loss, ys, qval, action_loss)<|docstring|>Perform algorithm optimizing. Returns: action_loss: Loss of action predicted by the policy network. qval_loss: Loss of q value predicted by the q network. ys: y_s. qval: Q value predicted by the q network.<|endoftext|>
f31e687e982bb8a9f342517872d2ab3a2fb4950c4e8b6a719033806f35dddcad
@callback def setup_bans(hass, app, login_threshold): 'Create IP Ban middleware for the app.' app.middlewares.append(ban_middleware) app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int) app[KEY_LOGIN_THRESHOLD] = login_threshold async def ban_startup(app): 'Initialize bans when app starts up.' app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE))) app.on_startup.append(ban_startup)
Create IP Ban middleware for the app.
homeassistant/components/http/ban.py
setup_bans
uSpike/home-assistant
23
python
@callback def setup_bans(hass, app, login_threshold): app.middlewares.append(ban_middleware) app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int) app[KEY_LOGIN_THRESHOLD] = login_threshold async def ban_startup(app): 'Initialize bans when app starts up.' app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE))) app.on_startup.append(ban_startup)
@callback def setup_bans(hass, app, login_threshold): app.middlewares.append(ban_middleware) app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int) app[KEY_LOGIN_THRESHOLD] = login_threshold async def ban_startup(app): 'Initialize bans when app starts up.' app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE))) app.on_startup.append(ban_startup)<|docstring|>Create IP Ban middleware for the app.<|endoftext|>
275c137e4663b857a777c12b2b6ad8884119b26647a174e030114ff502323185
@middleware async def ban_middleware(request, handler): 'IP Ban middleware.' if (KEY_BANNED_IPS not in request.app): _LOGGER.error('IP Ban middleware loaded but banned IPs not loaded') return (await handler(request)) ip_address_ = request[KEY_REAL_IP] is_banned = any(((ip_ban.ip_address == ip_address_) for ip_ban in request.app[KEY_BANNED_IPS])) if is_banned: raise HTTPForbidden() try: return (await handler(request)) except HTTPUnauthorized: (await process_wrong_login(request)) raise
IP Ban middleware.
homeassistant/components/http/ban.py
ban_middleware
uSpike/home-assistant
23
python
@middleware async def ban_middleware(request, handler): if (KEY_BANNED_IPS not in request.app): _LOGGER.error('IP Ban middleware loaded but banned IPs not loaded') return (await handler(request)) ip_address_ = request[KEY_REAL_IP] is_banned = any(((ip_ban.ip_address == ip_address_) for ip_ban in request.app[KEY_BANNED_IPS])) if is_banned: raise HTTPForbidden() try: return (await handler(request)) except HTTPUnauthorized: (await process_wrong_login(request)) raise
@middleware async def ban_middleware(request, handler): if (KEY_BANNED_IPS not in request.app): _LOGGER.error('IP Ban middleware loaded but banned IPs not loaded') return (await handler(request)) ip_address_ = request[KEY_REAL_IP] is_banned = any(((ip_ban.ip_address == ip_address_) for ip_ban in request.app[KEY_BANNED_IPS])) if is_banned: raise HTTPForbidden() try: return (await handler(request)) except HTTPUnauthorized: (await process_wrong_login(request)) raise<|docstring|>IP Ban middleware.<|endoftext|>
172ae85c069e2f305b676747b31865c3eef93a10a27736b5fb70c63f6f981dfb
def log_invalid_auth(func): 'Decorate function to handle invalid auth or failed login attempts.' async def handle_req(view, request, *args, **kwargs): 'Try to log failed login attempts if response status >= 400.' resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp return handle_req
Decorate function to handle invalid auth or failed login attempts.
homeassistant/components/http/ban.py
log_invalid_auth
uSpike/home-assistant
23
python
def log_invalid_auth(func): async def handle_req(view, request, *args, **kwargs): 'Try to log failed login attempts if response status >= 400.' resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp return handle_req
def log_invalid_auth(func): async def handle_req(view, request, *args, **kwargs): 'Try to log failed login attempts if response status >= 400.' resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp return handle_req<|docstring|>Decorate function to handle invalid auth or failed login attempts.<|endoftext|>
24aaa1a7bb6680275287ed913011f6fcc1ad689ff6ce65c91cff81211d96263b
async def process_wrong_login(request): 'Process a wrong login attempt.\n\n Increase failed login attempts counter for remote IP address.\n Add ip ban entry if failed login attempts exceeds threshold.\n ' remote_addr = request[KEY_REAL_IP] msg = 'Login attempt or request with invalid authentication from {}'.format(remote_addr) _LOGGER.warning(msg) hass = request.app['hass'] hass.components.persistent_notification.async_create(msg, 'Login attempt failed', NOTIFICATION_ID_LOGIN) if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1 if (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] >= request.app[KEY_LOGIN_THRESHOLD]): new_ban = IpBan(remote_addr) request.app[KEY_BANNED_IPS].append(new_ban) (await hass.async_add_job(update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban)) _LOGGER.warning('Banned IP %s for too many login attempts', remote_addr) hass.components.persistent_notification.async_create(f'Too many login attempts from {remote_addr}', 'Banning IP address', NOTIFICATION_ID_BAN)
Process a wrong login attempt. Increase failed login attempts counter for remote IP address. Add ip ban entry if failed login attempts exceeds threshold.
homeassistant/components/http/ban.py
process_wrong_login
uSpike/home-assistant
23
python
async def process_wrong_login(request): 'Process a wrong login attempt.\n\n Increase failed login attempts counter for remote IP address.\n Add ip ban entry if failed login attempts exceeds threshold.\n ' remote_addr = request[KEY_REAL_IP] msg = 'Login attempt or request with invalid authentication from {}'.format(remote_addr) _LOGGER.warning(msg) hass = request.app['hass'] hass.components.persistent_notification.async_create(msg, 'Login attempt failed', NOTIFICATION_ID_LOGIN) if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1 if (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] >= request.app[KEY_LOGIN_THRESHOLD]): new_ban = IpBan(remote_addr) request.app[KEY_BANNED_IPS].append(new_ban) (await hass.async_add_job(update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban)) _LOGGER.warning('Banned IP %s for too many login attempts', remote_addr) hass.components.persistent_notification.async_create(f'Too many login attempts from {remote_addr}', 'Banning IP address', NOTIFICATION_ID_BAN)
async def process_wrong_login(request): 'Process a wrong login attempt.\n\n Increase failed login attempts counter for remote IP address.\n Add ip ban entry if failed login attempts exceeds threshold.\n ' remote_addr = request[KEY_REAL_IP] msg = 'Login attempt or request with invalid authentication from {}'.format(remote_addr) _LOGGER.warning(msg) hass = request.app['hass'] hass.components.persistent_notification.async_create(msg, 'Login attempt failed', NOTIFICATION_ID_LOGIN) if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1 if (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] >= request.app[KEY_LOGIN_THRESHOLD]): new_ban = IpBan(remote_addr) request.app[KEY_BANNED_IPS].append(new_ban) (await hass.async_add_job(update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban)) _LOGGER.warning('Banned IP %s for too many login attempts', remote_addr) hass.components.persistent_notification.async_create(f'Too many login attempts from {remote_addr}', 'Banning IP address', NOTIFICATION_ID_BAN)<|docstring|>Process a wrong login attempt. Increase failed login attempts counter for remote IP address. Add ip ban entry if failed login attempts exceeds threshold.<|endoftext|>
6092c2c803d876ce78e21172eaec2c8e0b0209520da44ff83f2addf329976fb2
async def process_success_login(request): 'Process a success login attempt.\n\n Reset failed login attempts counter for remote IP address.\n No release IP address from banned list function, it can only be done by\n manual modify ip bans config file.\n ' remote_addr = request[KEY_REAL_IP] if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return if ((remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS]) and (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0)): _LOGGER.debug('Login success, reset failed login attempts counter from %s', remote_addr) request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)
Process a success login attempt. Reset failed login attempts counter for remote IP address. No release IP address from banned list function, it can only be done by manual modify ip bans config file.
homeassistant/components/http/ban.py
process_success_login
uSpike/home-assistant
23
python
async def process_success_login(request): 'Process a success login attempt.\n\n Reset failed login attempts counter for remote IP address.\n No release IP address from banned list function, it can only be done by\n manual modify ip bans config file.\n ' remote_addr = request[KEY_REAL_IP] if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return if ((remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS]) and (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0)): _LOGGER.debug('Login success, reset failed login attempts counter from %s', remote_addr) request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)
async def process_success_login(request): 'Process a success login attempt.\n\n Reset failed login attempts counter for remote IP address.\n No release IP address from banned list function, it can only be done by\n manual modify ip bans config file.\n ' remote_addr = request[KEY_REAL_IP] if ((KEY_BANNED_IPS not in request.app) or (request.app[KEY_LOGIN_THRESHOLD] < 1)): return if ((remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS]) and (request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0)): _LOGGER.debug('Login success, reset failed login attempts counter from %s', remote_addr) request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)<|docstring|>Process a success login attempt. Reset failed login attempts counter for remote IP address. No release IP address from banned list function, it can only be done by manual modify ip bans config file.<|endoftext|>
616ea3b2ec3b983d52b2318031954a3e15cadafaf5c6db1eba4c464ab0f863c7
async def async_load_ip_bans_config(hass: HomeAssistant, path: str) -> List[IpBan]: 'Load list of banned IPs from config file.' ip_list: List[IpBan] = [] try: list_ = (await hass.async_add_executor_job(load_yaml_config_file, path)) except FileNotFoundError: return ip_list except HomeAssistantError as err: _LOGGER.error('Unable to load %s: %s', path, str(err)) return ip_list for (ip_ban, ip_info) in list_.items(): try: ip_info = SCHEMA_IP_BAN_ENTRY(ip_info) ip_list.append(IpBan(ip_ban, ip_info['banned_at'])) except vol.Invalid as err: _LOGGER.error('Failed to load IP ban %s: %s', ip_info, err) continue return ip_list
Load list of banned IPs from config file.
homeassistant/components/http/ban.py
async_load_ip_bans_config
uSpike/home-assistant
23
python
async def async_load_ip_bans_config(hass: HomeAssistant, path: str) -> List[IpBan]: ip_list: List[IpBan] = [] try: list_ = (await hass.async_add_executor_job(load_yaml_config_file, path)) except FileNotFoundError: return ip_list except HomeAssistantError as err: _LOGGER.error('Unable to load %s: %s', path, str(err)) return ip_list for (ip_ban, ip_info) in list_.items(): try: ip_info = SCHEMA_IP_BAN_ENTRY(ip_info) ip_list.append(IpBan(ip_ban, ip_info['banned_at'])) except vol.Invalid as err: _LOGGER.error('Failed to load IP ban %s: %s', ip_info, err) continue return ip_list
async def async_load_ip_bans_config(hass: HomeAssistant, path: str) -> List[IpBan]: ip_list: List[IpBan] = [] try: list_ = (await hass.async_add_executor_job(load_yaml_config_file, path)) except FileNotFoundError: return ip_list except HomeAssistantError as err: _LOGGER.error('Unable to load %s: %s', path, str(err)) return ip_list for (ip_ban, ip_info) in list_.items(): try: ip_info = SCHEMA_IP_BAN_ENTRY(ip_info) ip_list.append(IpBan(ip_ban, ip_info['banned_at'])) except vol.Invalid as err: _LOGGER.error('Failed to load IP ban %s: %s', ip_info, err) continue return ip_list<|docstring|>Load list of banned IPs from config file.<|endoftext|>
28e0265c7649fff1b8d7f2ce5a52ccedd8b6ff783bcae90462a87ecfe6559421
def update_ip_bans_config(path: str, ip_ban: IpBan) -> None: 'Update config file with new banned IP address.' with open(path, 'a') as out: ip_ = {str(ip_ban.ip_address): {ATTR_BANNED_AT: ip_ban.banned_at.strftime('%Y-%m-%dT%H:%M:%S')}} out.write('\n') out.write(dump(ip_))
Update config file with new banned IP address.
homeassistant/components/http/ban.py
update_ip_bans_config
uSpike/home-assistant
23
python
def update_ip_bans_config(path: str, ip_ban: IpBan) -> None: with open(path, 'a') as out: ip_ = {str(ip_ban.ip_address): {ATTR_BANNED_AT: ip_ban.banned_at.strftime('%Y-%m-%dT%H:%M:%S')}} out.write('\n') out.write(dump(ip_))
def update_ip_bans_config(path: str, ip_ban: IpBan) -> None: with open(path, 'a') as out: ip_ = {str(ip_ban.ip_address): {ATTR_BANNED_AT: ip_ban.banned_at.strftime('%Y-%m-%dT%H:%M:%S')}} out.write('\n') out.write(dump(ip_))<|docstring|>Update config file with new banned IP address.<|endoftext|>
8f3d20e64e8bc020d119ce208339b00dae40e9baa9e4fd5c582b43eb335f817b
async def ban_startup(app): 'Initialize bans when app starts up.' app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE)))
Initialize bans when app starts up.
homeassistant/components/http/ban.py
ban_startup
uSpike/home-assistant
23
python
async def ban_startup(app): app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE)))
async def ban_startup(app): app[KEY_BANNED_IPS] = (await async_load_ip_bans_config(hass, hass.config.path(IP_BANS_FILE)))<|docstring|>Initialize bans when app starts up.<|endoftext|>
0459a41b3360b59ab7f84131f5fc0c077793657b0cb4def6ea9e08417d61bb60
async def handle_req(view, request, *args, **kwargs): 'Try to log failed login attempts if response status >= 400.' resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp
Try to log failed login attempts if response status >= 400.
homeassistant/components/http/ban.py
handle_req
uSpike/home-assistant
23
python
async def handle_req(view, request, *args, **kwargs): resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp
async def handle_req(view, request, *args, **kwargs): resp = (await func(view, request, *args, **kwargs)) if (resp.status >= 400): (await process_wrong_login(request)) return resp<|docstring|>Try to log failed login attempts if response status >= 400.<|endoftext|>
66464665317894d7070d2506381dbe5f7e9318e0b38d689e4045a322c3af5332
def __init__(self, ip_ban: str, banned_at: Optional[datetime]=None) -> None: 'Initialize IP Ban object.' self.ip_address = ip_address(ip_ban) self.banned_at = (banned_at or datetime.utcnow())
Initialize IP Ban object.
homeassistant/components/http/ban.py
__init__
uSpike/home-assistant
23
python
def __init__(self, ip_ban: str, banned_at: Optional[datetime]=None) -> None: self.ip_address = ip_address(ip_ban) self.banned_at = (banned_at or datetime.utcnow())
def __init__(self, ip_ban: str, banned_at: Optional[datetime]=None) -> None: self.ip_address = ip_address(ip_ban) self.banned_at = (banned_at or datetime.utcnow())<|docstring|>Initialize IP Ban object.<|endoftext|>
f70c6a772c258a45281e7bba5050fbb27fcc730b0b06dc762c1dcc0395ff62af
def get_content(suffix, print_data=False): '\n From the page ( \'ecolex.org\'+ suffix ) we grab the relevant metadata (eg. type, document Type, name, reference, number,\n date, source name and source link, status, subject, keywords, treaty name and link, meeting name and link, website, abstract,\n ...).\n The data is then saved into a dictionary with parameter names as keys and the grabbed results as the values.\n\n Example:\n\n data["category"] = "Treaty decision"\n data["name"] = "Decision XXIX_21 _ Membership of the Implementation Committee"\n\n In the end the dictionary is saved into a json file named (data["name"] without forbidden characters and \n length limited to 100).json\n\n Parameters:\n suffix : string\n the suffix of the url from which we are extracting the data. The suffix string is everything that comes \n after the \'ecolex.org\'\n\n print_data : boolean \n Optional parameter that is by default set to False. In case it is set to True, the function will at the end \n also print what it managed to extract from the page.\n\n Returns \n None\n ' data = dict() data['URL'] = (BASE_URL + suffix) get_page = requests.get((BASE_URL + suffix)) if (get_page.status_code != 200): print('Request Denied!', suffix) page_text = get_page.text soup = BeautifulSoup(page_text, 'html.parser') important_text = str(soup.find('article')) string_parameters = {'date': '<dt>Date.*\\s*<dd>(.*?)<', 'sourceLink': 'Source.*\\s*.*\\s*.*?href="(.*?)"', 'sourceName': 'Source.*\\s*.*\\s*.*?>(.*?)<', 'sourceID': '\\(ID:.*?>(.*?)<', 'publisher': 'Publisher.*\\s*.*\\s*(.*)', 'placePublication': 'Place of publication.*\\s*.*\\s*.*\\s*\\|(.*)', 'ISBN': 'ISBN.*\\s*<dd>(.*?)<', 'ISSN': 'ISSN.*\\s*<dd>(.*?)<', 'pages': 'Pages.*\\s*<dd>(\\d*)', 'documentType': 'Document type.*\\s*<dd>(.*?)<', 'fullTextLink': 'Full text.*\\s*.*\\s*.*?href="(.*?)"', 'website': 'Website.*\\s*.*\\s*<a href="(.*?)"', 'basin': 'Basin.*\\s*<dd>(.*?)<', 'fieldOfApplication': 'Field of application.*\\s*<dd>(.*?)<', 'DOI': 'DOI.*\\s*.*\\s*<a href="(.*?)"', 'journal/series': 'Journal\\/Series.*\\s*<dd>\\s*(.*\\s*\\|.*)'} list_parameters = {'author': 'uthor.*\\s*<dd>(.*?)<', 'language': 'Language.*\\s*<dd>(.*?)<', 'country/Territory': 'Country\\/Territory.*\\s*<dd>(.*?)<', 'subject': 'Subject.*\\s*<dd>(.*?)<', 'geographicalArea': 'Geographical area.*\\s*<dd>(.*?)<'} for (parameter_name, regex_pattern) in string_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_value_or_none(re_pat, important_text) for (parameter_name, regex_pattern) in list_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_list_or_none(re_pat, important_text) data['category'] = 'literature' re_name = re.compile('<h1>(.*?)<') data['name'] = get_value_or_none(re_name, important_text) if (data['name'] is not None): data['name'] = remove_forbidden_characters(data['name']) else: print('Name of the file not found!', suffix) re_keyword = re.compile('span class="tag">(.*?)<') data['keyword'] = re.findall(re_keyword, important_text) re_abstract = re.compile('class="abstract">(.*)') data['abstract'] = get_value_or_none(re_abstract, important_text) ref_section = soup.find('article').find('section', {'id': 'other-references'}) if (ref_section is not None): data['other_references'] = list() other_refs = ref_section.find_all('dl') for each_reference in other_refs: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refType': '<dt>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*)"', 'refName': 'result-title.*\\s*.*\\s*title="(.*)"', 'refDocumentType': 'Document type">(.*?)<', 'refPlaceOfAdoption': 'Place of adoption">(.*?)<', 'refDate': 'Date:(.*?)"', 'refSourceID': 'source.*\\s*.*?ID:(.*?)<', 'refSourceLink': 'source.*\\s*.*?href="(.*?)"', 'refSourceName': 'source.*\\s*.*?href.*?>(.*?)<'} ref_list_parameters = {'refKeywords': 'keywords">(.*?)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['other_references'].append(single_reference) ref_section_literature = soup.find('article').find('section', {'id': 'literature-references'}) if (ref_section_literature is not None): data['literature_references'] = [] literature_references = ref_section_literature.find('dl') for each_reference in literature_references: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refName': 'result-title.*\\s*.*\\s*.*?>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*?)"', 'refAuthor': 'uthor:.*\\s*.*?>(.*?)<', 'refPublishedIn': 'details.*\\s*.*?In:.*?span>(.*?)<', 'refPublishedInWhere': 'details.*\\s*.*In.*\\s*\\|(.*)', 'refPublisher': 'Publisher.*?span>(.*)<', 'refPublicationPlace': 'Publication place">(.*)<', 'refPublicationDate': 'ublication date">(.*)<', 'refSourceLink': 'Source.*\\s*.*?href="(.*?)"', 'refSourceName': 'Source.*\\s*.*?>(.*?)<', 'refSourceID': 'result-source.*\\s*.*?ID:(.*)\\)'} ref_list_parameters = {'refCountryTerritory': 'Territory">(.*)<', 'refKeywords': 'keywords">(.*)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['literature_references'].append(single_reference) if print_data: for (key, value) in data.items(): print(((key + ' : ') + str(value))) with open((('literature\\' + data['name'][:150]) + '.json'), 'w') as outfile: json.dump(data, outfile, indent=2)
From the page ( 'ecolex.org'+ suffix ) we grab the relevant metadata (eg. type, document Type, name, reference, number, date, source name and source link, status, subject, keywords, treaty name and link, meeting name and link, website, abstract, ...). The data is then saved into a dictionary with parameter names as keys and the grabbed results as the values. Example: data["category"] = "Treaty decision" data["name"] = "Decision XXIX_21 _ Membership of the Implementation Committee" In the end the dictionary is saved into a json file named (data["name"] without forbidden characters and length limited to 100).json Parameters: suffix : string the suffix of the url from which we are extracting the data. The suffix string is everything that comes after the 'ecolex.org' print_data : boolean Optional parameter that is by default set to False. In case it is set to True, the function will at the end also print what it managed to extract from the page. Returns None
crawlers/ecolex/get_content_literature.py
get_content
KraljSamo/text_embedding_service_entrypoint
1
python
def get_content(suffix, print_data=False): '\n From the page ( \'ecolex.org\'+ suffix ) we grab the relevant metadata (eg. type, document Type, name, reference, number,\n date, source name and source link, status, subject, keywords, treaty name and link, meeting name and link, website, abstract,\n ...).\n The data is then saved into a dictionary with parameter names as keys and the grabbed results as the values.\n\n Example:\n\n data["category"] = "Treaty decision"\n data["name"] = "Decision XXIX_21 _ Membership of the Implementation Committee"\n\n In the end the dictionary is saved into a json file named (data["name"] without forbidden characters and \n length limited to 100).json\n\n Parameters:\n suffix : string\n the suffix of the url from which we are extracting the data. The suffix string is everything that comes \n after the \'ecolex.org\'\n\n print_data : boolean \n Optional parameter that is by default set to False. In case it is set to True, the function will at the end \n also print what it managed to extract from the page.\n\n Returns \n None\n ' data = dict() data['URL'] = (BASE_URL + suffix) get_page = requests.get((BASE_URL + suffix)) if (get_page.status_code != 200): print('Request Denied!', suffix) page_text = get_page.text soup = BeautifulSoup(page_text, 'html.parser') important_text = str(soup.find('article')) string_parameters = {'date': '<dt>Date.*\\s*<dd>(.*?)<', 'sourceLink': 'Source.*\\s*.*\\s*.*?href="(.*?)"', 'sourceName': 'Source.*\\s*.*\\s*.*?>(.*?)<', 'sourceID': '\\(ID:.*?>(.*?)<', 'publisher': 'Publisher.*\\s*.*\\s*(.*)', 'placePublication': 'Place of publication.*\\s*.*\\s*.*\\s*\\|(.*)', 'ISBN': 'ISBN.*\\s*<dd>(.*?)<', 'ISSN': 'ISSN.*\\s*<dd>(.*?)<', 'pages': 'Pages.*\\s*<dd>(\\d*)', 'documentType': 'Document type.*\\s*<dd>(.*?)<', 'fullTextLink': 'Full text.*\\s*.*\\s*.*?href="(.*?)"', 'website': 'Website.*\\s*.*\\s*<a href="(.*?)"', 'basin': 'Basin.*\\s*<dd>(.*?)<', 'fieldOfApplication': 'Field of application.*\\s*<dd>(.*?)<', 'DOI': 'DOI.*\\s*.*\\s*<a href="(.*?)"', 'journal/series': 'Journal\\/Series.*\\s*<dd>\\s*(.*\\s*\\|.*)'} list_parameters = {'author': 'uthor.*\\s*<dd>(.*?)<', 'language': 'Language.*\\s*<dd>(.*?)<', 'country/Territory': 'Country\\/Territory.*\\s*<dd>(.*?)<', 'subject': 'Subject.*\\s*<dd>(.*?)<', 'geographicalArea': 'Geographical area.*\\s*<dd>(.*?)<'} for (parameter_name, regex_pattern) in string_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_value_or_none(re_pat, important_text) for (parameter_name, regex_pattern) in list_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_list_or_none(re_pat, important_text) data['category'] = 'literature' re_name = re.compile('<h1>(.*?)<') data['name'] = get_value_or_none(re_name, important_text) if (data['name'] is not None): data['name'] = remove_forbidden_characters(data['name']) else: print('Name of the file not found!', suffix) re_keyword = re.compile('span class="tag">(.*?)<') data['keyword'] = re.findall(re_keyword, important_text) re_abstract = re.compile('class="abstract">(.*)') data['abstract'] = get_value_or_none(re_abstract, important_text) ref_section = soup.find('article').find('section', {'id': 'other-references'}) if (ref_section is not None): data['other_references'] = list() other_refs = ref_section.find_all('dl') for each_reference in other_refs: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refType': '<dt>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*)"', 'refName': 'result-title.*\\s*.*\\s*title="(.*)"', 'refDocumentType': 'Document type">(.*?)<', 'refPlaceOfAdoption': 'Place of adoption">(.*?)<', 'refDate': 'Date:(.*?)"', 'refSourceID': 'source.*\\s*.*?ID:(.*?)<', 'refSourceLink': 'source.*\\s*.*?href="(.*?)"', 'refSourceName': 'source.*\\s*.*?href.*?>(.*?)<'} ref_list_parameters = {'refKeywords': 'keywords">(.*?)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['other_references'].append(single_reference) ref_section_literature = soup.find('article').find('section', {'id': 'literature-references'}) if (ref_section_literature is not None): data['literature_references'] = [] literature_references = ref_section_literature.find('dl') for each_reference in literature_references: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refName': 'result-title.*\\s*.*\\s*.*?>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*?)"', 'refAuthor': 'uthor:.*\\s*.*?>(.*?)<', 'refPublishedIn': 'details.*\\s*.*?In:.*?span>(.*?)<', 'refPublishedInWhere': 'details.*\\s*.*In.*\\s*\\|(.*)', 'refPublisher': 'Publisher.*?span>(.*)<', 'refPublicationPlace': 'Publication place">(.*)<', 'refPublicationDate': 'ublication date">(.*)<', 'refSourceLink': 'Source.*\\s*.*?href="(.*?)"', 'refSourceName': 'Source.*\\s*.*?>(.*?)<', 'refSourceID': 'result-source.*\\s*.*?ID:(.*)\\)'} ref_list_parameters = {'refCountryTerritory': 'Territory">(.*)<', 'refKeywords': 'keywords">(.*)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['literature_references'].append(single_reference) if print_data: for (key, value) in data.items(): print(((key + ' : ') + str(value))) with open((('literature\\' + data['name'][:150]) + '.json'), 'w') as outfile: json.dump(data, outfile, indent=2)
def get_content(suffix, print_data=False): '\n From the page ( \'ecolex.org\'+ suffix ) we grab the relevant metadata (eg. type, document Type, name, reference, number,\n date, source name and source link, status, subject, keywords, treaty name and link, meeting name and link, website, abstract,\n ...).\n The data is then saved into a dictionary with parameter names as keys and the grabbed results as the values.\n\n Example:\n\n data["category"] = "Treaty decision"\n data["name"] = "Decision XXIX_21 _ Membership of the Implementation Committee"\n\n In the end the dictionary is saved into a json file named (data["name"] without forbidden characters and \n length limited to 100).json\n\n Parameters:\n suffix : string\n the suffix of the url from which we are extracting the data. The suffix string is everything that comes \n after the \'ecolex.org\'\n\n print_data : boolean \n Optional parameter that is by default set to False. In case it is set to True, the function will at the end \n also print what it managed to extract from the page.\n\n Returns \n None\n ' data = dict() data['URL'] = (BASE_URL + suffix) get_page = requests.get((BASE_URL + suffix)) if (get_page.status_code != 200): print('Request Denied!', suffix) page_text = get_page.text soup = BeautifulSoup(page_text, 'html.parser') important_text = str(soup.find('article')) string_parameters = {'date': '<dt>Date.*\\s*<dd>(.*?)<', 'sourceLink': 'Source.*\\s*.*\\s*.*?href="(.*?)"', 'sourceName': 'Source.*\\s*.*\\s*.*?>(.*?)<', 'sourceID': '\\(ID:.*?>(.*?)<', 'publisher': 'Publisher.*\\s*.*\\s*(.*)', 'placePublication': 'Place of publication.*\\s*.*\\s*.*\\s*\\|(.*)', 'ISBN': 'ISBN.*\\s*<dd>(.*?)<', 'ISSN': 'ISSN.*\\s*<dd>(.*?)<', 'pages': 'Pages.*\\s*<dd>(\\d*)', 'documentType': 'Document type.*\\s*<dd>(.*?)<', 'fullTextLink': 'Full text.*\\s*.*\\s*.*?href="(.*?)"', 'website': 'Website.*\\s*.*\\s*<a href="(.*?)"', 'basin': 'Basin.*\\s*<dd>(.*?)<', 'fieldOfApplication': 'Field of application.*\\s*<dd>(.*?)<', 'DOI': 'DOI.*\\s*.*\\s*<a href="(.*?)"', 'journal/series': 'Journal\\/Series.*\\s*<dd>\\s*(.*\\s*\\|.*)'} list_parameters = {'author': 'uthor.*\\s*<dd>(.*?)<', 'language': 'Language.*\\s*<dd>(.*?)<', 'country/Territory': 'Country\\/Territory.*\\s*<dd>(.*?)<', 'subject': 'Subject.*\\s*<dd>(.*?)<', 'geographicalArea': 'Geographical area.*\\s*<dd>(.*?)<'} for (parameter_name, regex_pattern) in string_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_value_or_none(re_pat, important_text) for (parameter_name, regex_pattern) in list_parameters.items(): re_pat = re.compile(regex_pattern) data[parameter_name] = get_list_or_none(re_pat, important_text) data['category'] = 'literature' re_name = re.compile('<h1>(.*?)<') data['name'] = get_value_or_none(re_name, important_text) if (data['name'] is not None): data['name'] = remove_forbidden_characters(data['name']) else: print('Name of the file not found!', suffix) re_keyword = re.compile('span class="tag">(.*?)<') data['keyword'] = re.findall(re_keyword, important_text) re_abstract = re.compile('class="abstract">(.*)') data['abstract'] = get_value_or_none(re_abstract, important_text) ref_section = soup.find('article').find('section', {'id': 'other-references'}) if (ref_section is not None): data['other_references'] = list() other_refs = ref_section.find_all('dl') for each_reference in other_refs: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refType': '<dt>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*)"', 'refName': 'result-title.*\\s*.*\\s*title="(.*)"', 'refDocumentType': 'Document type">(.*?)<', 'refPlaceOfAdoption': 'Place of adoption">(.*?)<', 'refDate': 'Date:(.*?)"', 'refSourceID': 'source.*\\s*.*?ID:(.*?)<', 'refSourceLink': 'source.*\\s*.*?href="(.*?)"', 'refSourceName': 'source.*\\s*.*?href.*?>(.*?)<'} ref_list_parameters = {'refKeywords': 'keywords">(.*?)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['other_references'].append(single_reference) ref_section_literature = soup.find('article').find('section', {'id': 'literature-references'}) if (ref_section_literature is not None): data['literature_references'] = [] literature_references = ref_section_literature.find('dl') for each_reference in literature_references: reftext = str(each_reference) single_reference = dict() ref_string_parameters = {'refName': 'result-title.*\\s*.*\\s*.*?>(.*?)<', 'refLink': 'result-title.*\\s*.*?href="(.*?)"', 'refAuthor': 'uthor:.*\\s*.*?>(.*?)<', 'refPublishedIn': 'details.*\\s*.*?In:.*?span>(.*?)<', 'refPublishedInWhere': 'details.*\\s*.*In.*\\s*\\|(.*)', 'refPublisher': 'Publisher.*?span>(.*)<', 'refPublicationPlace': 'Publication place">(.*)<', 'refPublicationDate': 'ublication date">(.*)<', 'refSourceLink': 'Source.*\\s*.*?href="(.*?)"', 'refSourceName': 'Source.*\\s*.*?>(.*?)<', 'refSourceID': 'result-source.*\\s*.*?ID:(.*)\\)'} ref_list_parameters = {'refCountryTerritory': 'Territory">(.*)<', 'refKeywords': 'keywords">(.*)<'} for (parameter_name, regex_pattern) in ref_string_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_value_or_none(re_pat, reftext) for (parameter_name, regex_pattern) in ref_list_parameters.items(): re_pat = re.compile(regex_pattern) single_reference[parameter_name] = get_list_or_none(re_pat, reftext) data['literature_references'].append(single_reference) if print_data: for (key, value) in data.items(): print(((key + ' : ') + str(value))) with open((('literature\\' + data['name'][:150]) + '.json'), 'w') as outfile: json.dump(data, outfile, indent=2)<|docstring|>From the page ( 'ecolex.org'+ suffix ) we grab the relevant metadata (eg. type, document Type, name, reference, number, date, source name and source link, status, subject, keywords, treaty name and link, meeting name and link, website, abstract, ...). The data is then saved into a dictionary with parameter names as keys and the grabbed results as the values. Example: data["category"] = "Treaty decision" data["name"] = "Decision XXIX_21 _ Membership of the Implementation Committee" In the end the dictionary is saved into a json file named (data["name"] without forbidden characters and length limited to 100).json Parameters: suffix : string the suffix of the url from which we are extracting the data. The suffix string is everything that comes after the 'ecolex.org' print_data : boolean Optional parameter that is by default set to False. In case it is set to True, the function will at the end also print what it managed to extract from the page. Returns None<|endoftext|>
6c4049a53c313665ddf5770402f19081b6f6c983e90006548527544bf12a4fe9
def create_mutual_information_matrix(sources_lagged, sinks, start_date, end_date): 'Takes in the lagged sources and calculates the mutual information between them and the\n sinks. mutual information is used' start_date_str = start_date.strftime('%Y-%m-%d') end_date_str = end_date.strftime('%Y-%m-%d') sources_lagged.index = pd.to_datetime(sources_lagged.index) sinks.index = pd.to_datetime(sinks.index) sources_clipped = sources_lagged[start_date_str:end_date_str] sinks_clipped = sinks[start_date_str:end_date_str] bins = [11, 11, 11] dfs = pd.DataFrame() MI_array = np.zeros((sources_clipped.shape[1], sinks_clipped.shape[1])) for (i, src_name) in enumerate(sources_clipped): for (j, snk_name) in enumerate(sinks_clipped): temp_src = sources_clipped[src_name] temp_snk = sinks_clipped[snk_name] paired = temp_src.to_frame().join(temp_snk).to_numpy() (MI, n) = TEpython_ParallelNAN2.mutinfo_new(paired, nbins=bins) MI_array[(i, j)] = MI mat = pd.DataFrame(MI_array.T, columns=sources_clipped.columns) mat = mat.set_index(sinks_clipped.columns) dfs = dfs.append(mat) return dfs
Takes in the lagged sources and calculates the mutual information between them and the sinks. mutual information is used
methods_exploration/functions/data_exploration_functions.py
create_mutual_information_matrix
galengorski/drb-estuary-salinity-ml
0
python
def create_mutual_information_matrix(sources_lagged, sinks, start_date, end_date): 'Takes in the lagged sources and calculates the mutual information between them and the\n sinks. mutual information is used' start_date_str = start_date.strftime('%Y-%m-%d') end_date_str = end_date.strftime('%Y-%m-%d') sources_lagged.index = pd.to_datetime(sources_lagged.index) sinks.index = pd.to_datetime(sinks.index) sources_clipped = sources_lagged[start_date_str:end_date_str] sinks_clipped = sinks[start_date_str:end_date_str] bins = [11, 11, 11] dfs = pd.DataFrame() MI_array = np.zeros((sources_clipped.shape[1], sinks_clipped.shape[1])) for (i, src_name) in enumerate(sources_clipped): for (j, snk_name) in enumerate(sinks_clipped): temp_src = sources_clipped[src_name] temp_snk = sinks_clipped[snk_name] paired = temp_src.to_frame().join(temp_snk).to_numpy() (MI, n) = TEpython_ParallelNAN2.mutinfo_new(paired, nbins=bins) MI_array[(i, j)] = MI mat = pd.DataFrame(MI_array.T, columns=sources_clipped.columns) mat = mat.set_index(sinks_clipped.columns) dfs = dfs.append(mat) return dfs
def create_mutual_information_matrix(sources_lagged, sinks, start_date, end_date): 'Takes in the lagged sources and calculates the mutual information between them and the\n sinks. mutual information is used' start_date_str = start_date.strftime('%Y-%m-%d') end_date_str = end_date.strftime('%Y-%m-%d') sources_lagged.index = pd.to_datetime(sources_lagged.index) sinks.index = pd.to_datetime(sinks.index) sources_clipped = sources_lagged[start_date_str:end_date_str] sinks_clipped = sinks[start_date_str:end_date_str] bins = [11, 11, 11] dfs = pd.DataFrame() MI_array = np.zeros((sources_clipped.shape[1], sinks_clipped.shape[1])) for (i, src_name) in enumerate(sources_clipped): for (j, snk_name) in enumerate(sinks_clipped): temp_src = sources_clipped[src_name] temp_snk = sinks_clipped[snk_name] paired = temp_src.to_frame().join(temp_snk).to_numpy() (MI, n) = TEpython_ParallelNAN2.mutinfo_new(paired, nbins=bins) MI_array[(i, j)] = MI mat = pd.DataFrame(MI_array.T, columns=sources_clipped.columns) mat = mat.set_index(sinks_clipped.columns) dfs = dfs.append(mat) return dfs<|docstring|>Takes in the lagged sources and calculates the mutual information between them and the sinks. mutual information is used<|endoftext|>
cb56d1606d56889d300d52ec89f83dba1616af2c4a2e212525e29b58d3b8bd77
def _get_variant(self, variant_file: Path) -> GameVariant: '\n Return the GameVariant for the variant specified by variant_file. \n Searches through the vgdl code to find the correct type:\n {chaser, fleeing, immovable}\n ' code = variant_file.read_text() return GameVariant(path=str(variant_file), enemy_type=re.search('enemy > (\\S+)', code)[1].lower(), message_type=re.search('message > (\\S+)', code)[1].lower(), goal_type=re.search('goal > (\\S+)', code)[1].lower(), decoy_message_type=re.search('decoy_message > (\\S+)', code)[1].lower(), decoy_goal_type=re.search('decoy_goal > (\\S+)', code)[1].lower())
Return the GameVariant for the variant specified by variant_file. Searches through the vgdl code to find the correct type: {chaser, fleeing, immovable}
messenger/envs/stage_three.py
_get_variant
ahjwang/messenger-emma
13
python
def _get_variant(self, variant_file: Path) -> GameVariant: '\n Return the GameVariant for the variant specified by variant_file. \n Searches through the vgdl code to find the correct type:\n {chaser, fleeing, immovable}\n ' code = variant_file.read_text() return GameVariant(path=str(variant_file), enemy_type=re.search('enemy > (\\S+)', code)[1].lower(), message_type=re.search('message > (\\S+)', code)[1].lower(), goal_type=re.search('goal > (\\S+)', code)[1].lower(), decoy_message_type=re.search('decoy_message > (\\S+)', code)[1].lower(), decoy_goal_type=re.search('decoy_goal > (\\S+)', code)[1].lower())
def _get_variant(self, variant_file: Path) -> GameVariant: '\n Return the GameVariant for the variant specified by variant_file. \n Searches through the vgdl code to find the correct type:\n {chaser, fleeing, immovable}\n ' code = variant_file.read_text() return GameVariant(path=str(variant_file), enemy_type=re.search('enemy > (\\S+)', code)[1].lower(), message_type=re.search('message > (\\S+)', code)[1].lower(), goal_type=re.search('goal > (\\S+)', code)[1].lower(), decoy_message_type=re.search('decoy_message > (\\S+)', code)[1].lower(), decoy_goal_type=re.search('decoy_goal > (\\S+)', code)[1].lower())<|docstring|>Return the GameVariant for the variant specified by variant_file. Searches through the vgdl code to find the correct type: {chaser, fleeing, immovable}<|endoftext|>
bcd8aa56c28593309fd3e79af98f4b349fbd2bb2f01f8bf429edb3c2c3b71e82
def _convert_obs(self, vgdl_obs): '\n Return a grid built from the vgdl observation which is a\n KeyValueObservation object (see vgdl code for details).\n ' entity_locs = Grid(layers=5, shuffle=self.shuffle_obs) avatar_locs = Grid(layers=1) if ('enemy.1' in vgdl_obs): entity_locs.add(self.game.enemy, Position(*vgdl_obs['enemy.1']['position'])) if ('message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['message.1']['position'])) else: entity_locs.entity_count += 1 if ('goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['goal.1']['position'])) if ('decoy_message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['decoy_message.1']['position'])) if ('decoy_goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['decoy_goal.1']['position'])) if ('no_message.1' in vgdl_obs): '\n Due to a quirk in VGDL, the avatar is no_message if it starts as no_message\n even if the avatar may have acquired the message at a later point.\n To check, if it has a message, check that the class vector corresponding to\n with_message is == 1.\n ' avatar_pos = Position(*vgdl_obs['no_message.1']['position']) if (vgdl_obs['no_message.1']['class'][(- 1)] == 1): avatar = config.WITH_MESSAGE else: avatar = config.NO_MESSAGE elif ('with_message.1' in vgdl_obs): avatar_pos = Position(*vgdl_obs['with_message.1']['position']) avatar = config.WITH_MESSAGE else: return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid} avatar_locs.add(avatar, avatar_pos) return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid}
Return a grid built from the vgdl observation which is a KeyValueObservation object (see vgdl code for details).
messenger/envs/stage_three.py
_convert_obs
ahjwang/messenger-emma
13
python
def _convert_obs(self, vgdl_obs): '\n Return a grid built from the vgdl observation which is a\n KeyValueObservation object (see vgdl code for details).\n ' entity_locs = Grid(layers=5, shuffle=self.shuffle_obs) avatar_locs = Grid(layers=1) if ('enemy.1' in vgdl_obs): entity_locs.add(self.game.enemy, Position(*vgdl_obs['enemy.1']['position'])) if ('message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['message.1']['position'])) else: entity_locs.entity_count += 1 if ('goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['goal.1']['position'])) if ('decoy_message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['decoy_message.1']['position'])) if ('decoy_goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['decoy_goal.1']['position'])) if ('no_message.1' in vgdl_obs): '\n Due to a quirk in VGDL, the avatar is no_message if it starts as no_message\n even if the avatar may have acquired the message at a later point.\n To check, if it has a message, check that the class vector corresponding to\n with_message is == 1.\n ' avatar_pos = Position(*vgdl_obs['no_message.1']['position']) if (vgdl_obs['no_message.1']['class'][(- 1)] == 1): avatar = config.WITH_MESSAGE else: avatar = config.NO_MESSAGE elif ('with_message.1' in vgdl_obs): avatar_pos = Position(*vgdl_obs['with_message.1']['position']) avatar = config.WITH_MESSAGE else: return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid} avatar_locs.add(avatar, avatar_pos) return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid}
def _convert_obs(self, vgdl_obs): '\n Return a grid built from the vgdl observation which is a\n KeyValueObservation object (see vgdl code for details).\n ' entity_locs = Grid(layers=5, shuffle=self.shuffle_obs) avatar_locs = Grid(layers=1) if ('enemy.1' in vgdl_obs): entity_locs.add(self.game.enemy, Position(*vgdl_obs['enemy.1']['position'])) if ('message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['message.1']['position'])) else: entity_locs.entity_count += 1 if ('goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['goal.1']['position'])) if ('decoy_message.1' in vgdl_obs): entity_locs.add(self.game.message, Position(*vgdl_obs['decoy_message.1']['position'])) if ('decoy_goal.1' in vgdl_obs): entity_locs.add(self.game.goal, Position(*vgdl_obs['decoy_goal.1']['position'])) if ('no_message.1' in vgdl_obs): '\n Due to a quirk in VGDL, the avatar is no_message if it starts as no_message\n even if the avatar may have acquired the message at a later point.\n To check, if it has a message, check that the class vector corresponding to\n with_message is == 1.\n ' avatar_pos = Position(*vgdl_obs['no_message.1']['position']) if (vgdl_obs['no_message.1']['class'][(- 1)] == 1): avatar = config.WITH_MESSAGE else: avatar = config.NO_MESSAGE elif ('with_message.1' in vgdl_obs): avatar_pos = Position(*vgdl_obs['with_message.1']['position']) avatar = config.WITH_MESSAGE else: return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid} avatar_locs.add(avatar, avatar_pos) return {'entities': entity_locs.grid, 'avatar': avatar_locs.grid}<|docstring|>Return a grid built from the vgdl observation which is a KeyValueObservation object (see vgdl code for details).<|endoftext|>
2611c9ee2cac77f406386dc594007f31f6335ed6694c8628b402fa13f288c76d
def reset(self, variant_id: int=None, **kwargs): '\n Resets the current environment. NOTE: We remake the environment each time.\n This is a workaround to a bug in py-vgdl, where env.reset() does not\n properly reset the environment. kwargs go to get_document().\n ' self.game = random.choice(self.all_games) if (variant_id is not None): variant = self.game_variants[variant_id] else: variant = random.choice(self.game_variants) init_state = random.choice(self.init_states) self._envargs = {'game_file': variant.path, 'level_file': init_state, 'notable_sprites': self.notable_sprites.copy(), 'obs_type': 'objects', 'block_size': 34} self.env = VGDLEnv(**self._envargs) vgdl_obs = self.env.reset() all_npcs = (Descr(entity=self.game.enemy.name, role='enemy', type=variant.enemy_type), Descr(entity=self.game.message.name, role='message', type=variant.message_type), Descr(entity=self.game.goal.name, role='goal', type=variant.goal_type), Descr(entity=self.game.message.name, role='enemy', type=variant.decoy_message_type), Descr(entity=self.game.goal.name, role='enemy', type=variant.decoy_goal_type)) manual = self.text_manual.get_document_plus(*all_npcs, **kwargs) manual.append(self.text_manual.get_decoy_descriptor(entity=self.game.enemy.name, not_of_role='enemy', not_of_type=variant.enemy_type)) if self.shuffle_obs: random.shuffle(manual) return (self._convert_obs(vgdl_obs), manual)
Resets the current environment. NOTE: We remake the environment each time. This is a workaround to a bug in py-vgdl, where env.reset() does not properly reset the environment. kwargs go to get_document().
messenger/envs/stage_three.py
reset
ahjwang/messenger-emma
13
python
def reset(self, variant_id: int=None, **kwargs): '\n Resets the current environment. NOTE: We remake the environment each time.\n This is a workaround to a bug in py-vgdl, where env.reset() does not\n properly reset the environment. kwargs go to get_document().\n ' self.game = random.choice(self.all_games) if (variant_id is not None): variant = self.game_variants[variant_id] else: variant = random.choice(self.game_variants) init_state = random.choice(self.init_states) self._envargs = {'game_file': variant.path, 'level_file': init_state, 'notable_sprites': self.notable_sprites.copy(), 'obs_type': 'objects', 'block_size': 34} self.env = VGDLEnv(**self._envargs) vgdl_obs = self.env.reset() all_npcs = (Descr(entity=self.game.enemy.name, role='enemy', type=variant.enemy_type), Descr(entity=self.game.message.name, role='message', type=variant.message_type), Descr(entity=self.game.goal.name, role='goal', type=variant.goal_type), Descr(entity=self.game.message.name, role='enemy', type=variant.decoy_message_type), Descr(entity=self.game.goal.name, role='enemy', type=variant.decoy_goal_type)) manual = self.text_manual.get_document_plus(*all_npcs, **kwargs) manual.append(self.text_manual.get_decoy_descriptor(entity=self.game.enemy.name, not_of_role='enemy', not_of_type=variant.enemy_type)) if self.shuffle_obs: random.shuffle(manual) return (self._convert_obs(vgdl_obs), manual)
def reset(self, variant_id: int=None, **kwargs): '\n Resets the current environment. NOTE: We remake the environment each time.\n This is a workaround to a bug in py-vgdl, where env.reset() does not\n properly reset the environment. kwargs go to get_document().\n ' self.game = random.choice(self.all_games) if (variant_id is not None): variant = self.game_variants[variant_id] else: variant = random.choice(self.game_variants) init_state = random.choice(self.init_states) self._envargs = {'game_file': variant.path, 'level_file': init_state, 'notable_sprites': self.notable_sprites.copy(), 'obs_type': 'objects', 'block_size': 34} self.env = VGDLEnv(**self._envargs) vgdl_obs = self.env.reset() all_npcs = (Descr(entity=self.game.enemy.name, role='enemy', type=variant.enemy_type), Descr(entity=self.game.message.name, role='message', type=variant.message_type), Descr(entity=self.game.goal.name, role='goal', type=variant.goal_type), Descr(entity=self.game.message.name, role='enemy', type=variant.decoy_message_type), Descr(entity=self.game.goal.name, role='enemy', type=variant.decoy_goal_type)) manual = self.text_manual.get_document_plus(*all_npcs, **kwargs) manual.append(self.text_manual.get_decoy_descriptor(entity=self.game.enemy.name, not_of_role='enemy', not_of_type=variant.enemy_type)) if self.shuffle_obs: random.shuffle(manual) return (self._convert_obs(vgdl_obs), manual)<|docstring|>Resets the current environment. NOTE: We remake the environment each time. This is a workaround to a bug in py-vgdl, where env.reset() does not properly reset the environment. kwargs go to get_document().<|endoftext|>
dba59fe509f157b7e00afc511640d6b1eda635d4b60d0f52e7d99c68793bfa5e
def convertBST(self, root): '\n # 先使用递归方法实现\n Solution._convert(root, [0]) # 使用list传引用简化代码\n return root\n ' if root: sum_ = 0 stack = [] p = root while (p or (len(stack) > 0)): while p: stack.append(p) p = p.right p = stack[(- 1)] stack.pop() p.val += sum_ sum_ = p.val p = p.left return root
# 先使用递归方法实现 Solution._convert(root, [0]) # 使用list传引用简化代码 return root
convert_bst_to_greater_tree.py
convertBST
Jwy-jump/python_codesets
0
python
def convertBST(self, root): '\n # 先使用递归方法实现\n Solution._convert(root, [0]) # 使用list传引用简化代码\n return root\n ' if root: sum_ = 0 stack = [] p = root while (p or (len(stack) > 0)): while p: stack.append(p) p = p.right p = stack[(- 1)] stack.pop() p.val += sum_ sum_ = p.val p = p.left return root
def convertBST(self, root): '\n # 先使用递归方法实现\n Solution._convert(root, [0]) # 使用list传引用简化代码\n return root\n ' if root: sum_ = 0 stack = [] p = root while (p or (len(stack) > 0)): while p: stack.append(p) p = p.right p = stack[(- 1)] stack.pop() p.val += sum_ sum_ = p.val p = p.left return root<|docstring|># 先使用递归方法实现 Solution._convert(root, [0]) # 使用list传引用简化代码 return root<|endoftext|>
75af1159c07020b95b94a37e75991e7796f6862b32d57a2565fccc0a8e90215c
def __init__(self, msg, app_label=None, detailed_error=None, last_sql_statement=None): "Initialize the error.\n\n Args:\n msg (unicode):\n The error message.\n\n app_label (unicode, optional):\n The label of the app that failed evolution.\n\n detailed_error (unicode, optional):\n Detailed error information from the failure that triggered this\n exception. This might be another exception's error message.\n\n last_sql_statement (unicode, optional):\n The last SQL statement that was executed.\n " super(EvolutionExecutionError, self).__init__(msg) self.app_label = app_label self.detailed_error = detailed_error self.last_sql_statement = last_sql_statement
Initialize the error. Args: msg (unicode): The error message. app_label (unicode, optional): The label of the app that failed evolution. detailed_error (unicode, optional): Detailed error information from the failure that triggered this exception. This might be another exception's error message. last_sql_statement (unicode, optional): The last SQL statement that was executed.
django_evolution/errors.py
__init__
beanbaginc/django-evolution
18
python
def __init__(self, msg, app_label=None, detailed_error=None, last_sql_statement=None): "Initialize the error.\n\n Args:\n msg (unicode):\n The error message.\n\n app_label (unicode, optional):\n The label of the app that failed evolution.\n\n detailed_error (unicode, optional):\n Detailed error information from the failure that triggered this\n exception. This might be another exception's error message.\n\n last_sql_statement (unicode, optional):\n The last SQL statement that was executed.\n " super(EvolutionExecutionError, self).__init__(msg) self.app_label = app_label self.detailed_error = detailed_error self.last_sql_statement = last_sql_statement
def __init__(self, msg, app_label=None, detailed_error=None, last_sql_statement=None): "Initialize the error.\n\n Args:\n msg (unicode):\n The error message.\n\n app_label (unicode, optional):\n The label of the app that failed evolution.\n\n detailed_error (unicode, optional):\n Detailed error information from the failure that triggered this\n exception. This might be another exception's error message.\n\n last_sql_statement (unicode, optional):\n The last SQL statement that was executed.\n " super(EvolutionExecutionError, self).__init__(msg) self.app_label = app_label self.detailed_error = detailed_error self.last_sql_statement = last_sql_statement<|docstring|>Initialize the error. Args: msg (unicode): The error message. app_label (unicode, optional): The label of the app that failed evolution. detailed_error (unicode, optional): Detailed error information from the failure that triggered this exception. This might be another exception's error message. last_sql_statement (unicode, optional): The last SQL statement that was executed.<|endoftext|>
ecbf04d19ac41a0b7aa6ceef26881c5dbca9c1033653981b7b744c614dc788ef
def __init__(self, version): 'Initialize the exception.\n\n Args:\n version (int):\n The invalid signature version.\n ' super(InvalidSignatureVersion, self).__init__(('%s is not a known signature version' % version))
Initialize the exception. Args: version (int): The invalid signature version.
django_evolution/errors.py
__init__
beanbaginc/django-evolution
18
python
def __init__(self, version): 'Initialize the exception.\n\n Args:\n version (int):\n The invalid signature version.\n ' super(InvalidSignatureVersion, self).__init__(('%s is not a known signature version' % version))
def __init__(self, version): 'Initialize the exception.\n\n Args:\n version (int):\n The invalid signature version.\n ' super(InvalidSignatureVersion, self).__init__(('%s is not a known signature version' % version))<|docstring|>Initialize the exception. Args: version (int): The invalid signature version.<|endoftext|>
6f670a8c06612b5de7a6a08446765dfa8a3243d45815e640ea40318179a7d7a4
def __init__(self, conflicts): 'Initialize the error.\n\n Args:\n conflicts (dict):\n A dictionary of conflicts, provided by the migrations system.\n ' super(MigrationConflictsError, self).__init__(("Conflicting migrations detected; multiple leaf nodes in the migration graph: (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % '; '.join((('%s in %s' % (', '.join(sorted(conflict_names)), app_label)) for (app_label, conflict_names) in six.iteritems(conflicts)))))
Initialize the error. Args: conflicts (dict): A dictionary of conflicts, provided by the migrations system.
django_evolution/errors.py
__init__
beanbaginc/django-evolution
18
python
def __init__(self, conflicts): 'Initialize the error.\n\n Args:\n conflicts (dict):\n A dictionary of conflicts, provided by the migrations system.\n ' super(MigrationConflictsError, self).__init__(("Conflicting migrations detected; multiple leaf nodes in the migration graph: (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % '; '.join((('%s in %s' % (', '.join(sorted(conflict_names)), app_label)) for (app_label, conflict_names) in six.iteritems(conflicts)))))
def __init__(self, conflicts): 'Initialize the error.\n\n Args:\n conflicts (dict):\n A dictionary of conflicts, provided by the migrations system.\n ' super(MigrationConflictsError, self).__init__(("Conflicting migrations detected; multiple leaf nodes in the migration graph: (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % '; '.join((('%s in %s' % (', '.join(sorted(conflict_names)), app_label)) for (app_label, conflict_names) in six.iteritems(conflicts)))))<|docstring|>Initialize the error. Args: conflicts (dict): A dictionary of conflicts, provided by the migrations system.<|endoftext|>
93aea98df7ca27b4a4b7deea727ce9ec96f517ea7c999f48c4b13808d5c7ad7e
def __init__(self, channel): 'Constructor.\n\n Args:\n channel: A grpc.Channel.\n ' self.OpenPassWindow = channel.unary_unary('/base.FunctionalService/OpenPassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.ClosePassWindow = channel.unary_unary('/base.FunctionalService/ClosePassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SetFanSpeed = channel.unary_unary('/base.FunctionalService/SetFanSpeed', request_serializer=functional__api__pb2.SenderInfo.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SubscribeToFanSpeed = channel.unary_stream('/base.FunctionalService/SubscribeToFanSpeed', request_serializer=functional__api__pb2.SubscriberRequest.SerializeToString, response_deserializer=functional__api__pb2.Value.FromString)
Constructor. Args: channel: A grpc.Channel.
examples/grpc/python/generated/functional_api_pb2_grpc.py
__init__
niclaslind/signalbroker-server
17
python
def __init__(self, channel): 'Constructor.\n\n Args:\n channel: A grpc.Channel.\n ' self.OpenPassWindow = channel.unary_unary('/base.FunctionalService/OpenPassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.ClosePassWindow = channel.unary_unary('/base.FunctionalService/ClosePassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SetFanSpeed = channel.unary_unary('/base.FunctionalService/SetFanSpeed', request_serializer=functional__api__pb2.SenderInfo.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SubscribeToFanSpeed = channel.unary_stream('/base.FunctionalService/SubscribeToFanSpeed', request_serializer=functional__api__pb2.SubscriberRequest.SerializeToString, response_deserializer=functional__api__pb2.Value.FromString)
def __init__(self, channel): 'Constructor.\n\n Args:\n channel: A grpc.Channel.\n ' self.OpenPassWindow = channel.unary_unary('/base.FunctionalService/OpenPassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.ClosePassWindow = channel.unary_unary('/base.FunctionalService/ClosePassWindow', request_serializer=common__pb2.ClientId.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SetFanSpeed = channel.unary_unary('/base.FunctionalService/SetFanSpeed', request_serializer=functional__api__pb2.SenderInfo.SerializeToString, response_deserializer=common__pb2.Empty.FromString) self.SubscribeToFanSpeed = channel.unary_stream('/base.FunctionalService/SubscribeToFanSpeed', request_serializer=functional__api__pb2.SubscriberRequest.SerializeToString, response_deserializer=functional__api__pb2.Value.FromString)<|docstring|>Constructor. Args: channel: A grpc.Channel.<|endoftext|>
5ef4075ca4c6c8e614144d827787504bf691b52e21b717a75f41d8aa1e354976
@staticmethod def create_output(df, colname): '\n this function will calculate the aggregated e and m\n given colname we would like to aggregate over\n ' return df[[colname, 'e']].groupby([colname]).sum().merge(df[[colname, 'm']].groupby([colname]).agg(AggregatedGeography.agg_moe), on=colname).reset_index().rename(columns={colname: 'census_geoid'})
this function will calculate the aggregated e and m given colname we would like to aggregate over
factfinder/geography/2010.py
create_output
EricaMaurer/db-factfinder
0
python
@staticmethod def create_output(df, colname): '\n this function will calculate the aggregated e and m\n given colname we would like to aggregate over\n ' return df[[colname, 'e']].groupby([colname]).sum().merge(df[[colname, 'm']].groupby([colname]).agg(AggregatedGeography.agg_moe), on=colname).reset_index().rename(columns={colname: 'census_geoid'})
@staticmethod def create_output(df, colname): '\n this function will calculate the aggregated e and m\n given colname we would like to aggregate over\n ' return df[[colname, 'e']].groupby([colname]).sum().merge(df[[colname, 'm']].groupby([colname]).agg(AggregatedGeography.agg_moe), on=colname).reset_index().rename(columns={colname: 'census_geoid'})<|docstring|>this function will calculate the aggregated e and m given colname we would like to aggregate over<|endoftext|>
5b32110cf17d07c7895a0bf24290411bd4f870f36d82bdf596cefff3dc0565d2
def block_group_to_cd_fp500(self, df): '\n 500 yr flood plain aggregation for block group data (ACS)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block_group', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
500 yr flood plain aggregation for block group data (ACS)
factfinder/geography/2010.py
block_group_to_cd_fp500
EricaMaurer/db-factfinder
0
python
def block_group_to_cd_fp500(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block_group', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_group_to_cd_fp500(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block_group', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>500 yr flood plain aggregation for block group data (ACS)<|endoftext|>
96ef224971d892232a17b5410e7c2d0d11673faddd1a357644149cbc50053fbf
def block_group_to_cd_fp100(self, df): '\n 100 yr flood plain aggregation for block group data (ACS)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block_group', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
100 yr flood plain aggregation for block group data (ACS)
factfinder/geography/2010.py
block_group_to_cd_fp100
EricaMaurer/db-factfinder
0
python
def block_group_to_cd_fp100(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block_group', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_group_to_cd_fp100(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block_group', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>100 yr flood plain aggregation for block group data (ACS)<|endoftext|>
0ad69b23d9b4c230a6de3f531ce5236d36e58e46466e9b1197b74895a172b09d
def block_group_to_cd_park_access(self, df): '\n walk-to-park access zone aggregation for block group data (acs)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block_group', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
walk-to-park access zone aggregation for block group data (acs)
factfinder/geography/2010.py
block_group_to_cd_park_access
EricaMaurer/db-factfinder
0
python
def block_group_to_cd_park_access(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block_group', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_group_to_cd_park_access(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block_group', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block_group', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>walk-to-park access zone aggregation for block group data (acs)<|endoftext|>
2626541de7c67f14127c18e0858e5a27c27ffba1189310b96e1fcafc1ea2f950
def block_to_cd_fp500(self, df): '\n 500 yr flood plain aggregation for block data (decennial)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
500 yr flood plain aggregation for block data (decennial)
factfinder/geography/2010.py
block_to_cd_fp500
EricaMaurer/db-factfinder
0
python
def block_to_cd_fp500(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_to_cd_fp500(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_500.isna()), ['geoid_block', 'cd_fp_500'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_500') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_500' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>500 yr flood plain aggregation for block data (decennial)<|endoftext|>
5039c7b63075f9f1b7c527ac19dfc9a2e0999b49ac39f312da99d9006fa029e2
def block_to_cd_fp100(self, df): '\n 100 yr flood plain aggregation for block data (decennial)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
100 yr flood plain aggregation for block data (decennial)
factfinder/geography/2010.py
block_to_cd_fp100
EricaMaurer/db-factfinder
0
python
def block_to_cd_fp100(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_to_cd_fp100(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_fp_100.isna()), ['geoid_block', 'cd_fp_100'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_fp_100') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_fp_100' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>100 yr flood plain aggregation for block data (decennial)<|endoftext|>
17c7234e66de2254dad7772211402560a1a9fccb10f6e52b2330065053b3242d
def block_to_cd_park_access(self, df): '\n walk-to-park access zone aggregation for block data (decennial)\n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
walk-to-park access zone aggregation for block data (decennial)
factfinder/geography/2010.py
block_to_cd_park_access
EricaMaurer/db-factfinder
0
python
def block_to_cd_park_access(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def block_to_cd_park_access(self, df): '\n \n ' df = df.merge(self.lookup_geo.loc[((~ self.lookup_geo.cd_park_access.isna()), ['geoid_block', 'cd_park_access'])].drop_duplicates(), how='right', right_on='geoid_block', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd_park_access') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd_park_access' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>walk-to-park access zone aggregation for block data (decennial)<|endoftext|>