hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
5d33f0625c53288d64064188bcbc357613405301
1,448
py
Python
tests/test_env.py
Majanao/pytorch-blender
eb5effb033094d037e7bdc2238c00806be7012ae
[ "MIT" ]
381
2019-07-03T14:15:16.000Z
2022-03-30T08:58:26.000Z
tests/test_env.py
ANABUR920/pytorch-blender
eb5effb033094d037e7bdc2238c00806be7012ae
[ "MIT" ]
18
2020-01-15T17:36:08.000Z
2021-12-31T08:37:54.000Z
tests/test_env.py
ANABUR920/pytorch-blender
eb5effb033094d037e7bdc2238c00806be7012ae
[ "MIT" ]
34
2019-07-09T03:15:02.000Z
2022-01-13T17:36:20.000Z
import pytest from pathlib import Path from blendtorch import btt BLENDDIR = Path(__file__).parent/'blender' class MyEnv(btt.env.OpenAIRemoteEnv): def __init__(self, background=True, **kwargs): super().__init__(version='1.0.0') self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR / 'env.blend.py', background=background, **kwargs) # For Blender 2.9 if we pass scene='', the tests below fail since # _env_post_step() is not called. Its unclear currently why this happens. def _run_remote_env(background): env = MyEnv(background=background) obs = env.reset() assert obs == 0. obs, reward, done, info = env.step(0.1) assert obs == pytest.approx(0.1) assert reward == 0. assert not done assert info['count'] == 2 # 1 is already set by reset() obs, reward, done, info = env.step(0.6) assert obs == pytest.approx(0.6) assert reward == 1. assert not done assert info['count'] == 3 for _ in range(8): obs, reward, done, info = env.step(0.6) assert done obs = env.reset() assert obs == 0. obs, reward, done, info = env.step(0.1) assert obs == pytest.approx(0.1) assert reward == 0. assert not done assert info['count'] == 2 env.close() @pytest.mark.background def test_remote_env(): _run_remote_env(background=True) def test_remote_env_ui(): _run_remote_env(background=False)
26.814815
81
0.641575
421
0.290746
0
0
83
0.05732
0
0
229
0.158149
5d356088cb332b6a1cde85497c82874f1681387b
20
py
Python
sitetree/__init__.py
sitkatech/django-sitetree
5d7e9d503f97ff021c5c04855e04e098b3d2488c
[ "BSD-3-Clause" ]
3
2019-02-12T01:58:42.000Z
2019-06-08T10:50:33.000Z
sitetree/__init__.py
sitkatech/django-sitetree
5d7e9d503f97ff021c5c04855e04e098b3d2488c
[ "BSD-3-Clause" ]
null
null
null
sitetree/__init__.py
sitkatech/django-sitetree
5d7e9d503f97ff021c5c04855e04e098b3d2488c
[ "BSD-3-Clause" ]
null
null
null
VERSION = (0, 9, 5)
10
19
0.5
0
0
0
0
0
0
0
0
0
0
5d36081f930dd6c0a745b46f1b5a299e738d247f
20,670
py
Python
deepvariant/runtime_by_region_vis.py
tahashmi/deepvariant
441c1809d3290f4a20b29a0a0bbf8ecfb929a6e3
[ "BSD-3-Clause" ]
4
2019-03-30T13:25:25.000Z
2020-10-14T18:47:21.000Z
deepvariant/runtime_by_region_vis.py
FrogEnthusiast7/deepvariant
84516dfacd1ed856a34507becb21848aa12e77a8
[ "BSD-3-Clause" ]
1
2021-06-18T15:04:47.000Z
2021-06-18T15:04:47.000Z
deepvariant/runtime_by_region_vis.py
FrogEnthusiast7/deepvariant
84516dfacd1ed856a34507becb21848aa12e77a8
[ "BSD-3-Clause" ]
1
2019-09-04T16:59:18.000Z
2019-09-04T16:59:18.000Z
# Copyright 2020 Google LLC. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. r"""Create a visual report of make_examples runtime by region. Use this script to visualize the runtime-by-region data generated by running make_examples with --runtime_by_region. """ from typing import Dict, Sequence, List, Tuple, Text, Any, Union from absl import app from absl import flags import altair as alt import pandas as pd import tensorflow as tf from third_party.nucleus.io import sharded_file_utils # Altair uses a lot of method chaining, such as # chart.mark_bar().encode(...).properties(...), so using backslash # continuation to break this into separate lines makes the code more readable. # pylint: disable=g-backslash-continuation VEGA_URL = 'https://storage.googleapis.com/deepvariant/lib/vega' FLAGS = flags.FLAGS flags.DEFINE_string( 'input', None, 'TSV file that was produced when running make_examples ' 'with --runtime_by_region. Can be sharded, e.g. /path/[email protected].') flags.DEFINE_string( 'title', None, 'Title will be shown at the top of the report and will ' 'be used as a prefix for downloaded image files.') flags.DEFINE_string('output', 'runtime_by_region_report.html', 'Path for the output report, which will be an html file.') RUNTIME_COLUMNS = [ 'get reads', 'find candidates', 'make pileup images', 'write outputs' ] COUNT_COLUMNS = ['num reads', 'num candidates', 'num examples'] CSS_STYLES = """ <style> body { font-family: sans-serif; } .chart-container { padding: 30px; } </style> """ def read_sharded_runtime_tsvs(path_string: str) -> pd.DataFrame: """Imports data from a single or sharded path into a pandas dataframe. Args: path_string: The path to the input file, which may be sharded. Returns: A dataframe matching the TSV file(s) but with added Task column. """ if sharded_file_utils.is_sharded_file_spec(path_string): paths = sharded_file_utils.generate_sharded_filenames(path_string) else: paths = [path_string] list_of_dataframes = [] for i, path in enumerate(paths): if path.startswith('gs://'): # Once pandas is updated to 0.24+, pd.read_csv will work for gs:// # without this workaround. with tf.io.gfile.GFile(path) as f: d = pd.read_csv(f, sep='\t') else: d = pd.read_csv(path, sep='\t') d['Task'] = i list_of_dataframes.append(d) return pd.concat(list_of_dataframes, axis=0, ignore_index=True) def format_runtime_string(raw_seconds: float) -> str: """Creates a nice format string from a potentially large number of seconds. Args: raw_seconds: A number of seconds. Returns: The seconds divided into hours, minutes, and remaining seconds, formatted nicely. For example, 2h3m5.012s. """ minutes, seconds = divmod(raw_seconds, 60) hours, minutes = divmod(minutes, 60) seconds = round(seconds, 3) output = '' if hours > 0: output += f'{int(hours)}h' if minutes > 0: output += f'{int(minutes)}m' if seconds > 0 or not output: output += f'{seconds}s' return output def calculate_totals(df: pd.DataFrame) -> pd.DataFrame: """Calculates total runtime, formats it nicely, and sorts by it. Args: df: A dataframe of runtime profiling numbers. Returns: The same dataframe with some additional summary columns. """ # 'total runtime' is a simple sum of the runtime columns. df['total runtime'] = df[RUNTIME_COLUMNS].sum(axis=1) # Create a formatted runtime string for tooltips. df['Runtime'] = df['total runtime'].apply(format_runtime_string) # Sort by descending total region runtime. df.sort_values(by='total runtime', inplace=True, ascending=False) return df def summarize_by_task(df: pd.DataFrame) -> pd.DataFrame: """Groups regions to get the total runtime for each task. Args: df: A dataframe of runtime profiling numbers. Returns: The dataframe grouped by task. """ by_task = df.groupby(by=['Task']).sum() return by_task.reset_index() def stage_histogram(d: pd.DataFrame, title: str = '') -> alt.Chart: """Plots a histogram of runtimes stacked by stage. Args: d: A dataframe of runtimes, either by region or by task. title: A title for the plot. Returns: An altair chart. """ columns_used = RUNTIME_COLUMNS d = d[columns_used] return alt.Chart(d).transform_fold( RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \ .mark_bar(opacity=0.3) \ .encode( x=alt.X('runtime_by_stage:Q', bin=alt.Bin(maxbins=100), title='Runtime (seconds)'), y=alt.Y('count()', title='Count of regions', stack=None), color=alt.Color('Stage:N', sort=None) ).properties(title=title) def correlation_scatter_charts(d: pd.DataFrame, title: str = '') -> alt.Chart: """Produces a grid of scatter plots of runtimes of stages versus covariates. Args: d: A pandas dataframe of runtime by regions. title: A title for the plot. Returns: An altair chart """ columns_used = ['region', 'total runtime'] + RUNTIME_COLUMNS + COUNT_COLUMNS d = d[columns_used] return alt.Chart(d).mark_circle(opacity=0.1).encode( x=alt.X(alt.repeat('column'), type='quantitative', axis=alt.Axis(labelExpr="datum.value + 's'")), y=alt.Y(alt.repeat('row'), type='quantitative'), tooltip='region' ).properties(width=100, height=100) \ .repeat( column=['total runtime'] + RUNTIME_COLUMNS, row=COUNT_COLUMNS, ).properties(title=title) def totals_by_stage(d: pd.DataFrame) -> alt.Chart: """Plots total runtimes for each stage. Args: d: A dataframe of runtimes. Returns: An altair chart. """ stage_totals_series = d.sum()[RUNTIME_COLUMNS] stage_totals = pd.DataFrame( stage_totals_series, columns=['Runtime (seconds)']) stage_totals.reset_index(inplace=True) stage_totals = stage_totals.rename(columns={'index': 'Stage'}) stage_totals['Runtime'] = stage_totals['Runtime (seconds)'].apply( format_runtime_string) return alt.Chart(stage_totals).mark_bar().encode( x='Runtime (seconds)', y=alt.Y('Stage', sort=None), tooltip=['Runtime'], fill=alt.Fill('Stage', sort=None)).properties(title='Overall runtime by stage') def pareto_by_task_tooltip(row: pd.Series) -> str: """For one row of a dataframe, computes a tooltip description. Args: row: A Pandas Series, one row of a dataframe containing some specific cumulative sum columns. Returns: A string to show as the tooltip for a pareto curve. """ return (f"{row['task cumsum order'] * 100:.2f}% of regions " f"account for {row['task cumsum fraction'] * 100:.2f}% of " f"the runtime in task {row['Task']}") def calculate_pareto_metrics(df_subset: pd.DataFrame) -> pd.DataFrame: """Calculates cumulative sums for a subset of a dataframe. Args: df_subset: A dataframe subset of one task. Returns: The same dataframe subset with some additional columns. """ # These are the same for all regions in the same task, for the scatter plot: df_subset['task total runtime'] = df_subset['total runtime'].sum() df_subset['Runtime for task'] = df_subset['task total runtime'].apply( format_runtime_string) df_subset['task num examples'] = df_subset['num examples'].sum() # These are cumulative sums for the pareto curves: df_subset['task cumsum fraction'] = df_subset['total runtime'].cumsum( ) / df_subset['total runtime'].sum() n = len(df_subset) df_subset['task cumsum order'] = list(map(lambda x: x / n, range(0, n))) df_subset['tooltip'] = df_subset.apply(pareto_by_task_tooltip, axis=1) return df_subset def pareto_and_runtimes_by_task(df: pd.DataFrame) -> alt.Chart: """Creates an interactive Pareto curve and scatter plot of task runtimes. Tracing each curve shows to what extent a small proportion of long-running regions contribute disproportionately to the overall runtime. That is, "The longest-running X% of regions account for Y% of the total runtime." There is a curve for each task. Args: df: A dataframe of all regions. Returns: An altair chart. """ grouped = df.groupby(df['Task'], sort=False) df = grouped.apply(calculate_pareto_metrics) # Sample along the Pareto curve, ensuring the longest regions are shown. if len(df) > 5000: x = 1000 df = pd.concat([df.nlargest(x, 'total runtime'), df.sample(5000 - x)]) # Limit columns to greatly reduce the size of the html report. columns_used = [ 'task cumsum order', 'task cumsum fraction', 'tooltip', 'Task', 'task total runtime', 'task num examples', 'Runtime for task' ] df = df[columns_used] # Brushing on the task_scatter plot highlights the same tasks in the Pareto # curve. brush = alt.selection_interval() pareto_by_task = alt.Chart(df).mark_line(size=2).encode( x=alt.X( 'task cumsum order', title='The longest-runtime X% of regions', axis=alt.Axis(format='%')), y=alt.Y( 'task cumsum fraction', title='Account for Y% of the total runtime', axis=alt.Axis(format='%')), tooltip='tooltip', color=alt.condition(brush, 'Task:N', alt.value('lightgray'))).properties( title='Pareto curve for each task').interactive() # This chart needs to use the same dataframe as the first chart to enable the # brushing on one to affect the other. Using max(task) for 'text' is a # trick that causes bundling by task to avoid showing multiple overlapping # points which otherwise make the text look funky. task_scatter = alt.Chart(df).mark_point(size=10).encode( x=alt.X('max(task total runtime)', title='Runtime (seconds)'), y=alt.Y('task num examples:Q', title='Number of examples'), color=alt.condition(brush, 'Task:N', alt.value('lightgray')), tooltip=['Task', 'Runtime for task'] ) \ .properties(title='Total runtime for each task (drag to highlight)') \ .add_selection(brush) return pareto_by_task | task_scatter def individual_region_bars(small_df: pd.DataFrame, title: Union[str, Dict[str, str]] = '') -> alt.Chart: """Makes a stacked bar chart with runtime of each stage for individual regions. Args: small_df: A dataframe of regions, each of which will be shown as a bar. title: A title for the plot. If a dict, it should contain 'title' and/or 'subtitle'. Returns: An altair chart. """ columns_used = ['region', 'Runtime'] + RUNTIME_COLUMNS d = small_df[columns_used] return alt.Chart(d).transform_fold( RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \ .mark_bar().encode( x=alt.X('region:N', sort=None), y=alt.Y('runtime_by_stage:Q', scale=alt.Scale(type='linear'), title='Runtime (seconds)'), fill=alt.Fill('Stage:N', sort=None), tooltip='Runtime:N' ).properties(title=title) def selected_longest_and_median_regions(df: pd.DataFrame) -> alt.Chart: """Creates a stacked bar charts of the top 20 and median 20 regions. Args: df: A dataframe of all regions. Returns: An altair chart. """ num_rows = len(df) mid = round(num_rows / 2) return individual_region_bars(df.iloc[0:20], 'Top runtime regions') \ | individual_region_bars(df.iloc[mid-10:mid+11], 'Median runtime regions') def top_regions_producing_zero_examples(df: pd.DataFrame) -> alt.Chart: """Creates a chart of the top regions that produced zero examples. Args: df: A dataframe of all regions. Returns: An altair chart. """ regions_with_zero_examples = df[df['num examples'] == 0] runtime_of_zeros = regions_with_zero_examples['total runtime'].sum() / 3600 total_runtime = df['total runtime'].sum() / 3600 subtitle = ( f'Spent {runtime_of_zeros:.2f} hours processing the ' f'{len(regions_with_zero_examples)} regions that produced no examples, ' f'which is {runtime_of_zeros / total_runtime * 100:.2f}% of the total ' f'runtime of {total_runtime:.2f} hours.') return individual_region_bars( regions_with_zero_examples.nlargest(50, 'total runtime'), title={ 'text': 'The longest-running regions that produced no examples', 'subtitle': subtitle }) def write_to_html_report(charts: List[Dict[Text, alt.Chart]], title: str, subtitle: str, html_output: Any) -> None: """Makes the html report with all the charts inserted. Args: charts: A list of altair chart objects. title: The title to show at the top of the report. subtitle: The subtitle to show just below the title on the report. html_output: a writable file object. Returns: None. Writes into the html_output file object. """ # Start the HTML document. html_output.write('<!DOCTYPE html>\n<html>\n<head>') # Add dependencies vega and vega-lite, which render the altair charts. html_output.write('<script type="text/javascript" src="{}/vega@5"></script>' '\n'.format(VEGA_URL)) html_output.write( '<script type="text/javascript" src="{}/[email protected]"></script>' '\n'.format(VEGA_URL)) html_output.write( '<script type="text/javascript" src="{}/vega-embed@6"></script>' '\n'.format(VEGA_URL)) # Add styles (CSS). html_output.write(CSS_STYLES) html_output.write('</head>\n<body>') html_output.write('<h1>{}</h1>\n'.format(title)) html_output.write('<h2>{}</h2>\n'.format(subtitle)) # Make a div containing all the charts. html_output.write('<div>') for chart in charts: html_output.write( '<div class="chart-container" id="vis_{}"></div>\n'.format(chart['id'])) html_output.write('</div>') # Add JSON vega specs and hook them up to the divs with VegaEmbed. html_output.write('<script>\n') for chart in charts: html_output.write('var spec_{} = {};\n'.format(chart['id'], chart['chart'].to_json())) download_filename = '{}_{}'.format(title.replace(' ', '_'), chart['id']) embed_options = {'mode': 'vega-lite', 'downloadFileName': download_filename} html_output.write('vegaEmbed("#vis_{}", spec_{}, {})\n'.format( chart['id'], chart['id'], embed_options)) html_output.write('</script>\n') # Close HTML document. html_output.write('</body></html>') def read_data_and_make_dataframes( input_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]: """Loads data from a file into one dataframe as-is and one by task. Args: input_path: str, path of the input TSV file (may be sharded). Returns: df: A dataframe with one row per region. by_task: A dataframe with one row per task. """ df = read_sharded_runtime_tsvs(input_path) df = calculate_totals(df) by_task = summarize_by_task(df) return df, by_task def make_all_charts( df: pd.DataFrame, by_task: pd.DataFrame) -> List[Dict[Text, Union[str, alt.Chart]]]: """Creates charts and puts them in a list with their ID names. Args: df: A dataframe with one row per region. by_task: A dataframe with one row per task. Returns: list of dicts, each containing a chart and a descriptive ID. """ charts = [{ 'id': 'total_by_stage', 'chart': totals_by_stage(by_task) }, { 'id': 'pareto_and_runtimes_by_task', 'chart': pareto_and_runtimes_by_task(df) }, { 'id': 'histogram_by_task', 'chart': stage_histogram(by_task, title='Stage runtimes for each task') }, { 'id': 'selected_longest_and_median_regions', 'chart': selected_longest_and_median_regions(df) }, { 'id': 'zero_examples', 'chart': top_regions_producing_zero_examples(df) }] # Altair shows a max of 5000 data points. if len(df) <= 5000: # With up to 5000 points, just show them all. charts.extend([{ 'id': 'histogram', 'chart': stage_histogram(df, title='Runtime by stage for all regions') }, { 'id': 'scatter_grid', 'chart': correlation_scatter_charts(df, title='Trends for all regions') }]) else: # With too many points, make different subsets to show trends better. top_100 = df.nlargest(100, 'total runtime') top_5000 = df.nlargest(5000, 'total runtime') # Sample the bottom 99% to avoid outliers that obscure general trends. bottom_99_percent = df.nsmallest(int(len(df) * .99), 'total runtime') if len(bottom_99_percent) > 5000: bottom_99_percent = bottom_99_percent.sample(5000) charts.extend([{ 'id': 'histogram_bottom_99_percent', 'chart': stage_histogram( bottom_99_percent, title='Runtime by stage for regions in the bottom 99%') }, { 'id': 'histogram_top_100', 'chart': stage_histogram( top_100, title='Runtime by stage for regions in the top 100') }, { 'id': 'scatter_grid_top_5000', 'chart': correlation_scatter_charts( top_5000, title='Trends for regions in the top 5000') }, { 'id': 'scatter_grid_bottom_99_percent', 'chart': correlation_scatter_charts( bottom_99_percent, title='Trends for regions in the bottom 99%') }]) return charts def make_report(input_path: str, title: str, html_output: tf.io.gfile.GFile) -> None: """Reads data, creates charts, and composes the charts into an HTML report. Args: input_path: Path of the input TSV file (or sharded files). title: Title to put at the top of the report. html_output: Writable file object where output will be written. """ # Load data into pandas dataframes and add summary columns. df, by_task = read_data_and_make_dataframes(input_path) # Build all the charts. charts = make_all_charts(df, by_task) # Write a subtitle with some top-level stats. subtitle = (f'Runtime profiling for make_examples on {len(df)} regions ' f'across {len(by_task)} task{"(s)" if len(by_task) > 1 else ""}') # Write the HTML report with all the charts. write_to_html_report( charts=charts, title=title, subtitle=subtitle, html_output=html_output) def main(argv: Sequence[str]): if len(argv) > 1: raise app.UsageError( 'Command line parsing failure: this script does not accept ' 'positional arguments, but found these extra arguments: "{}".' ''.format(str(argv[1:]))) # Add html to the output path if that is not already the suffix. if FLAGS.output.endswith('html'): output_filename = FLAGS.output else: output_filename = f'{FLAGS.output}.html' # Start HTML document. Using GFile enables writing to GCS too. html_output = tf.io.gfile.GFile(output_filename, 'w') make_report( input_path=FLAGS.input, title=FLAGS.title, html_output=html_output) html_output.close() # Abstracted out the file open/close to enable testing. print('Output written to:', output_filename) if __name__ == '__main__': flags.mark_flags_as_required(['input', 'title']) app.run(main)
34.335548
97
0.677504
0
0
0
0
0
0
0
0
11,486
0.555685
5d36d6dbf217342990cb49eda55af38f42824619
4,238
py
Python
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
wangyum/anaconda
6e5a0dbead3327661d73a61e85414cf92aa52be6
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
import sys import unittest from dynd import nd, ndt """ class TestFields(unittest.TestCase): def test_simple(self): a = nd.array([ (1, 2, 'a', 'b'), (3, 4, 'ab', 'cd'), (5, 6, 'def', 'ghi')], type='3 * {x: int32, y: int32, z: string, w: string}') # Selecting a single field b = nd.fields(a, 'x') self.assertEqual(nd.dtype_of(b), ndt.make_struct( [ndt.int32], ['x'])) self.assertEqual(nd.as_py(b.x), nd.as_py(a.x)) # Selecting two fields b = nd.fields(a, 'z', 'y') self.assertEqual(nd.dtype_of(b), ndt.make_struct( [ndt.string, ndt.int32], ['z', 'y'])) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) # Selecting three fields b = nd.fields(a, 'w', 'y', 'z') self.assertEqual(nd.dtype_of(b), ndt.make_struct( [ndt.string, ndt.int32, ndt.string], ['w', 'y', 'z'])) self.assertEqual(nd.as_py(b.w), nd.as_py(a.w)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) # Reordering all four fields b = nd.fields(a, 'w', 'y', 'x', 'z') self.assertEqual(nd.dtype_of(b), ndt.make_struct( [ndt.string, ndt.int32, ndt.int32, ndt.string], ['w', 'y', 'x', 'z'])) self.assertEqual(nd.as_py(b.w), nd.as_py(a.w)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) self.assertEqual(nd.as_py(b.x), nd.as_py(a.x)) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) def test_fixed_var(self): a = nd.array([ [(1, 2, 'a', 'b'), (3, 4, 'ab', 'cd')], [(5, 6, 'def', 'ghi')], [(7, 8, 'alpha', 'beta'), (9, 10, 'X', 'Y'), (11, 12, 'the', 'end')]], type='3 * var * {x: int32, y: int32, z: string, w: string}') # Selecting a single field b = nd.fields(a, 'x') self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3, ndt.make_var_dim(ndt.make_struct( [ndt.int32], ['x'])))) self.assertEqual(nd.as_py(b.x), nd.as_py(a.x)) # Selecting two fields b = nd.fields(a, 'z', 'y') self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3, ndt.make_var_dim(ndt.make_struct( [ndt.string, ndt.int32], ['z', 'y'])))) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) # Selecting three fields b = nd.fields(a, 'w', 'y', 'z') self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3, ndt.make_var_dim(ndt.make_struct( [ndt.string, ndt.int32, ndt.string], ['w', 'y', 'z'])))) self.assertEqual(nd.as_py(b.w), nd.as_py(a.w)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) # Reordering all four fields b = nd.fields(a, 'w', 'y', 'x', 'z') self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3, ndt.make_var_dim(ndt.make_struct( [ndt.string, ndt.int32, ndt.int32, ndt.string], ['w', 'y', 'x', 'z'])))) self.assertEqual(nd.as_py(b.w), nd.as_py(a.w)) self.assertEqual(nd.as_py(b.y), nd.as_py(a.y)) self.assertEqual(nd.as_py(b.x), nd.as_py(a.x)) self.assertEqual(nd.as_py(b.z), nd.as_py(a.z)) def test_bad_field_name(self): a = nd.array([ (1, 2, 'a', 'b'), (3, 4, 'ab', 'cd'), (5, 6, 'def', 'ghi')], type='3 * {x: int32, y: int32, z: string, w: string}') self.assertRaises(RuntimeError, nd.fields, a, 'y', 'v') """ if __name__ == '__main__': unittest.main()
42.808081
76
0.464606
0
0
0
0
0
0
0
0
4,146
0.978292
5d373f0e4790917fc2d0b3ea420a4ad7a8c76024
4,096
py
Python
xeofs/pandas/_transformer.py
nicrie/xeofs
4c0ed49b45794ce0abb641c98b82638b2faa4828
[ "MIT" ]
3
2022-02-22T07:56:09.000Z
2022-03-30T10:47:20.000Z
xeofs/pandas/_transformer.py
nicrie/xeofs
4c0ed49b45794ce0abb641c98b82638b2faa4828
[ "MIT" ]
13
2022-02-15T13:44:34.000Z
2022-03-15T22:51:01.000Z
xeofs/pandas/_transformer.py
nicrie/xeofs
4c0ed49b45794ce0abb641c98b82638b2faa4828
[ "MIT" ]
2
2022-02-17T19:02:59.000Z
2022-02-22T07:56:15.000Z
from typing import Union, Iterable, List import numpy as np import pandas as pd from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer class _DataFrameTransformer(_ArrayTransformer): '''`_ArrayTransformer` wrapper for `pandas.DataFrame`. ''' def __init__(self): super().__init__() def fit(self, X : pd.DataFrame, axis : Union[int, Iterable[int]] = 0): if not isinstance(X, pd.DataFrame): raise ValueError('This interface is for `pandas.DataFrame` only') if isinstance(axis, list): axis = axis[0] # Set sample and feature index if axis == 0: self.index_samples = X.index self.index_features = X.columns elif axis == 1: self.index_samples = X.columns self.index_features = X.index else: raise ValueError('axis must be either 0 or 1') # Fit the data try: super().fit(X=X.values, axis=axis) except AttributeError: err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame)) raise TypeError(err_msg) return self def transform(self, X : pd.DataFrame) -> np.ndarray: try: return super().transform(X.values) except AttributeError: err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame)) raise TypeError(err_msg) def fit_transform(self, X : pd.DataFrame, axis : int = 0) -> np.ndarray: return self.fit(X=X, axis=axis).transform(X) def transform_weights(self, weights : pd.DataFrame) -> np.ndarray: try: return super().transform_weights(weights.values) except AttributeError: return super().transform_weights(weights) def back_transform(self, X : np.ndarray) -> pd.DataFrame: df = super().back_transform(X) return pd.DataFrame( df, index=self.index_samples, columns=self.index_features ) def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame: eofs = super().back_transform_eofs(X) return pd.DataFrame( eofs, index=self.index_features, columns=range(1, eofs.shape[-1] + 1) ) def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame: pcs = super().back_transform_pcs(X) return pd.DataFrame( pcs, index=self.index_samples, columns=range(1, pcs.shape[-1] + 1) ) class _MultiDataFrameTransformer(_MultiArrayTransformer): 'Transform multiple 2D ``pd.DataFrame`` to a single 2D ``np.ndarry``.' def __init__(self): super().__init__() def fit(self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0): X = self._convert2list(X) self.tfs = [_DataFrameTransformer().fit(x, axis=axis) for x in X] if len(set([tf.n_valid_samples for tf in self.tfs])) > 1: err_msg = 'All individual arrays must have same number of samples.' raise ValueError(err_msg) self.idx_array_sep = np.cumsum([tf.n_valid_features for tf in self.tfs]) self.axis_samples = self.tfs[0].axis_samples return self def transform(self, X : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray: return super().transform(X=X) def transform_weights(self, weights : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray: return super().transform_weights(weights=weights) def fit_transform( self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0 ) -> np.ndarray: return self.fit(X=X, axis=axis).transform(X) def back_transform(self, X : np.ndarray) -> pd.DataFrame: return super().back_transform(X=X) def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame: return super().back_transform_eofs(X=X) def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame: return super().back_transform_pcs(X=X)
35.310345
101
0.616211
3,933
0.960205
0
0
0
0
0
0
369
0.090088
5d38d81bebcb78fdcd3ec6d9d6e334cd582c79d5
1,004
py
Python
tests/bogus_python_model.py
FossilizedContainers/fossilized-controller
5aa14112b3728a619a37233906366c1cda2a0a77
[ "MIT" ]
1
2022-01-24T21:54:46.000Z
2022-01-24T21:54:46.000Z
tests/bogus_python_model.py
FossilizedContainers/fossilized-controller
5aa14112b3728a619a37233906366c1cda2a0a77
[ "MIT" ]
null
null
null
tests/bogus_python_model.py
FossilizedContainers/fossilized-controller
5aa14112b3728a619a37233906366c1cda2a0a77
[ "MIT" ]
null
null
null
import os import sys import lipd # import pythonAdapter, assumes in ../python-adapter/ tests_dir = os.path.dirname(os.path.realpath(__file__)) fc_dir = os.path.dirname(tests_dir) python_adapter_dir = os.path.join(fc_dir, "python-adapter") sys.path.append(python_adapter_dir) import adapter def fake_model(adapter): # check to see inside function print("\n---\nStart of the fake_model function\n---\n") # the parameters are handed to you by the adapter files = adapter.get_files() # use the parameters given by the adapter to get the binary data of the LiPD file lipd.readLipd(files['weldeab']) # get the binary data of the NetCDF file net_cdf_path = files['net_cdf'] # mark the NetCDF file as an output file adapter.set_output_files(net_cdf_path) adapter.set_output_files("lipd-files\\") return # have to call adapter in the adapter.py file as adapter.adapter adapter = adapter.global_adapter adapter.register(fake_model) adapter.start_server()
26.421053
85
0.739044
0
0
0
0
0
0
0
0
453
0.451195
5d38e4a873930da8bc4504369cb7f1bca6894323
13,421
py
Python
tello_control_ui.py
banne2266/UAV-autopilot-NCTU-2021
1a25d4add2de9659516d045054935e3b6e04d06d
[ "MIT" ]
null
null
null
tello_control_ui.py
banne2266/UAV-autopilot-NCTU-2021
1a25d4add2de9659516d045054935e3b6e04d06d
[ "MIT" ]
null
null
null
tello_control_ui.py
banne2266/UAV-autopilot-NCTU-2021
1a25d4add2de9659516d045054935e3b6e04d06d
[ "MIT" ]
null
null
null
from PIL import Image from PIL import ImageTk import tkinter as tki from tkinter import Toplevel, Scale import threading import datetime import cv2 import os import time import platform class TelloUI: """Wrapper class to enable the GUI.""" def __init__(self,tello,outputpath): """ Initial all the element of the GUI,support by Tkinter :param tello: class interacts with the Tello drone. Raises: RuntimeError: If the Tello rejects the attempt to enter command mode. """ self.tello = tello # videostream device self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button self.frame = None # frame read from h264decoder and used for pose recognition self.thread = None # thread of the Tkinter mainloop self.stopEvent = None # control variables self.distance = 0.1 # default distance for 'move' cmd self.degree = 30 # default degree for 'cw' or 'ccw' cmd # if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello self.quit_waiting_flag = False # initialize the root window and image panel self.root = tki.Tk() self.panel = None # create buttons self.btn_snapshot = tki.Button(self.root, text="Snapshot!", command=self.takeSnapshot) self.btn_snapshot.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo) self.btn_pause.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_landing = tki.Button( self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow) self.btn_landing.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) # start a thread that constantly pools the video sensor for # the most recently read frame self.stopEvent = threading.Event() self.thread = threading.Thread(target=self.videoLoop, args=()) self.thread.start() # set a callback to handle when the window is closed self.root.wm_title("TELLO Controller") self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose) # the sending_command will send command to tello every 5 seconds self.sending_command_thread = threading.Thread(target = self._sendingCommand) def videoLoop(self): """ The mainloop thread of Tkinter Raises: RuntimeError: To get around a RunTime error that Tkinter throws due to threading. """ try: # start the thread that get GUI image and drwa skeleton time.sleep(0.5) self.sending_command_thread.start() while not self.stopEvent.is_set(): system = platform.system() # read the frame for GUI show self.frame = self.tello.read() if self.frame is None or self.frame.size == 0: continue # transfer the format from frame to image image = Image.fromarray(self.frame) # we found compatibility problem between Tkinter,PIL and Macos,and it will # sometimes result the very long preriod of the "ImageTk.PhotoImage" function, # so for Macos,we start a new thread to execute the _updateGUIImage function. if system =="Windows" or system =="Linux": self._updateGUIImage(image) else: thread_tmp = threading.Thread(target=self._updateGUIImage,args=(image,)) thread_tmp.start() time.sleep(0.03) except RuntimeError as e: print("[INFO] caught a RuntimeError") def _updateGUIImage(self,image): """ Main operation to initial the object of image,and update the GUI panel """ image = ImageTk.PhotoImage(image) # if the panel none ,we need to initial it if self.panel is None: self.panel = tki.Label(image=image) self.panel.image = image self.panel.pack(side="left", padx=10, pady=10) # otherwise, simply update the panel else: self.panel.configure(image=image) self.panel.image = image def _sendingCommand(self): """ start a while loop that sends 'command' to tello every 5 second """ while True: self.tello.send_command('command') time.sleep(5) def _setQuitWaitingFlag(self): """ set the variable as TRUE,it will stop computer waiting for response from tello """ self.quit_waiting_flag = True def openCmdWindow(self): """ open the cmd window and initial all the button and text """ panel = Toplevel(self.root) panel.wm_title("Command Panel") # create text input entry text0 = tki.Label(panel, text='This Controller map keyboard inputs to Tello control commands\n' 'Adjust the trackbar to reset distance and degree parameter', font='Helvetica 10 bold' ) text0.pack(side='top') text1 = tki.Label(panel, text= 'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n' 'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n' 'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n' 'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right', justify="left") text1.pack(side="top") self.btn_landing = tki.Button( panel, text="Land", relief="raised", command=self.telloLanding) self.btn_landing.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_takeoff = tki.Button( panel, text="Takeoff", relief="raised", command=self.telloTakeOff) self.btn_takeoff.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) # binding arrow keys to drone control self.tmp_f = tki.Frame(panel, width=100, height=2) self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w) self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s) self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a) self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d) self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up) self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down) self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left) self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right) self.tmp_f.pack(side="bottom") self.tmp_f.focus_set() self.btn_landing = tki.Button( panel, text="Flip", relief="raised", command=self.openFlipWindow) self.btn_landing.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)', resolution=0.01) self.distance_bar.set(0.2) self.distance_bar.pack(side="left") self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised", command=self.updateDistancebar, ) self.btn_distance.pack(side="left", fill="both", expand="yes", padx=10, pady=5) self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree') self.degree_bar.set(30) self.degree_bar.pack(side="right") self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar) self.btn_distance.pack(side="right", fill="both", expand="yes", padx=10, pady=5) def openFlipWindow(self): """ open the flip window and initial all the button and text """ panel = Toplevel(self.root) panel.wm_title("Gesture Recognition") self.btn_flipl = tki.Button( panel, text="Flip Left", relief="raised", command=self.telloFlip_l) self.btn_flipl.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_flipr = tki.Button( panel, text="Flip Right", relief="raised", command=self.telloFlip_r) self.btn_flipr.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_flipf = tki.Button( panel, text="Flip Forward", relief="raised", command=self.telloFlip_f) self.btn_flipf.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) self.btn_flipb = tki.Button( panel, text="Flip Backward", relief="raised", command=self.telloFlip_b) self.btn_flipb.pack(side="bottom", fill="both", expand="yes", padx=10, pady=5) def takeSnapshot(self): """ save the current frame of the video as a jpg file and put it into outputpath """ # grab the current timestamp and use it to construct the filename ts = datetime.datetime.now() filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S")) p = os.path.sep.join((self.outputPath, filename)) # save the file cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)) print("[INFO] saved {}".format(filename)) def pauseVideo(self): """ Toggle the freeze/unfreze of video """ if self.btn_pause.config('relief')[-1] == 'sunken': self.btn_pause.config(relief="raised") self.tello.video_freeze(False) else: self.btn_pause.config(relief="sunken") self.tello.video_freeze(True) def telloTakeOff(self): return self.tello.takeoff() def telloLanding(self): return self.tello.land() def telloFlip_l(self): return self.tello.flip('l') def telloFlip_r(self): return self.tello.flip('r') def telloFlip_f(self): return self.tello.flip('f') def telloFlip_b(self): return self.tello.flip('b') def telloCW(self, degree): return self.tello.rotate_cw(degree) def telloCCW(self, degree): return self.tello.rotate_ccw(degree) def telloMoveForward(self, distance): return self.tello.move_forward(distance) def telloMoveBackward(self, distance): return self.tello.move_backward(distance) def telloMoveLeft(self, distance): return self.tello.move_left(distance) def telloMoveRight(self, distance): return self.tello.move_right(distance) def telloUp(self, dist): return self.tello.move_up(dist) def telloDown(self, dist): return self.tello.move_down(dist) def updateTrackBar(self): self.my_tello_hand.setThr(self.hand_thr_bar.get()) def updateDistancebar(self): self.distance = self.distance_bar.get() print ('reset distance to %.1f' % self.distance) def updateDegreebar(self): self.degree = self.degree_bar.get() print ('reset distance to %d' % self.degree) def on_keypress_w(self, event): print ("up %d m" % self.distance) self.telloUp(self.distance) def on_keypress_s(self, event): print ("down %d m" % self.distance) self.telloDown(self.distance) def on_keypress_a(self, event): print ("ccw %d degree" % self.degree) self.tello.rotate_ccw(self.degree) def on_keypress_d(self, event): print ("cw %d m" % self.degree) self.tello.rotate_cw(self.degree) def on_keypress_up(self, event): print ("forward %d m" % self.distance) self.telloMoveForward(self.distance) def on_keypress_down(self, event): print ("backward %d m" % self.distance) self.telloMoveBackward(self.distance) def on_keypress_left(self, event): print ("left %d m" % self.distance) self.telloMoveLeft(self.distance) def on_keypress_right(self, event): print ("right %d m" % self.distance) self.telloMoveRight(self.distance) def on_keypress_enter(self, event): if self.frame is not None: self.registerFace() self.tmp_f.focus_set() def onClose(self): """ set the stop event, cleanup the camera, and allow the rest of the quit process to continue """ print("[INFO] closing...") self.stopEvent.set() del self.tello self.root.quit()
37.177285
113
0.580583
13,232
0.985918
0
0
0
0
0
0
3,799
0.283064
5d3a6779a16e847e6ab8367c806b8cd0393b9b7c
160
py
Python
__temp/examples/rhino/mesh-stanford-dragon.py
robin-gdwl/examples_topop-desc
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
[ "MIT" ]
null
null
null
__temp/examples/rhino/mesh-stanford-dragon.py
robin-gdwl/examples_topop-desc
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
[ "MIT" ]
null
null
null
__temp/examples/rhino/mesh-stanford-dragon.py
robin-gdwl/examples_topop-desc
3a10dfc891c3e6998029c7baf8a5a7a501870fe2
[ "MIT" ]
1
2022-01-16T02:32:43.000Z
2022-01-16T02:32:43.000Z
import compas import compas_rhino from compas.datastructures import Mesh mesh = Mesh.from_ply(compas.get('stanford_dragon.ply')) compas_rhino.mesh_draw(mesh)
20
55
0.825
0
0
0
0
0
0
0
0
21
0.13125
5d3b019f7105ea70804aca52b749a325dbd4f20c
416
py
Python
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
vbsteja/code
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
[ "Apache-2.0" ]
3
2018-08-06T15:34:58.000Z
2022-02-11T14:19:05.000Z
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
vbsteja/code
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
[ "Apache-2.0" ]
null
null
null
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py
vbsteja/code
0c8f4dc579f5de21b6c55fe6e65c3c8eb5473687
[ "Apache-2.0" ]
3
2018-08-06T15:35:01.000Z
2020-08-08T07:53:07.000Z
# Neural Networks Demystified # Part 1: Data + Architecture # # Supporting code for short YouTube series on artificial neural networks. # # Stephen Welch # @stephencwelch import numpy as np # X = (hours sleeping, hours studying), y = Score on test X = np.array(([3,5], [5,1], [10,2]), dtype=float) y = np.array(([75], [82], [93]), dtype=float) # Normalize X = X/np.amax(X, axis=0) y = y/100 #Max test score is 100
24.470588
73
0.673077
0
0
0
0
0
0
0
0
254
0.610577
5d3c91e42dac2041a621585dba8f1dfdc1e88107
19,048
py
Python
manubot/process/util.py
benstear/manubot
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
[ "BSD-3-Clause" ]
null
null
null
manubot/process/util.py
benstear/manubot
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
[ "BSD-3-Clause" ]
null
null
null
manubot/process/util.py
benstear/manubot
df184a5c7e5eb98894a3edb43d9772d1ac3e01ab
[ "BSD-3-Clause" ]
null
null
null
import json import logging import os import pathlib import re import textwrap import warnings from typing import List, Optional import jinja2 import pandas import requests import requests_cache import yaml from manubot.util import read_serialized_data, read_serialized_dict from manubot.process.bibliography import load_manual_references from manubot.process.ci import get_continuous_integration_parameters from manubot.process.metadata import ( get_header_includes, get_thumbnail_url, get_manuscript_urls, get_software_versions, ) from manubot.process.manuscript import ( datetime_now, get_manuscript_stats, get_text, ) from manubot.cite.citekey import ( citekey_to_csl_item, shorten_citekey, is_valid_citekey, standardize_citekey, ) def check_collisions(citekeys_df): """ Check for short_citekey hash collisions """ collision_df = citekeys_df[["standard_citekey", "short_citekey"]].drop_duplicates() collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)] if not collision_df.empty: logging.error(f"OMF! Hash collision. Congratulations.\n{collision_df}") return collision_df def check_multiple_citation_strings(citekeys_df): """ Identify different citation strings referring the the same reference. """ message = textwrap.dedent( f"""\ {len(citekeys_df)} unique citations strings extracted from text {citekeys_df.standard_citekey.nunique()} unique standard citations\ """ ) logging.info(message) multi_df = citekeys_df[citekeys_df.standard_citekey.duplicated(keep=False)] if not multi_df.empty: table = multi_df.to_string( index=False, columns=["standard_citekey", "manuscript_citekey"] ) logging.warning(f"Multiple citekeys detected for the same reference:\n{table}") return multi_df def read_variable_files(paths: List[str], variables: Optional[dict] = None) -> dict: """ Read multiple serialized data files into a user_variables dictionary. Provide `paths` (a list of URLs or local file paths). Paths can optionally have a namespace prepended. For example: ```python paths = [ 'https://git.io/vbkqm', # update the dictionary's top-level 'namespace_1=https://git.io/vbkqm', # store under 'namespace_1' key 'namespace_2=some_local_path.json', # store under 'namespace_2' key ] ``` If a namespace is not provided, the JSON must contain a dictionary as its top level. Namespaces should consist only of ASCII alphanumeric characters (includes underscores, first character cannot be numeric). Pass a dictionary to `variables` to update an existing dictionary rather than create a new dictionary. """ if variables is None: variables = {} for path in paths: logging.info(f"Reading user-provided templating variables at {path!r}") # Match only namespaces that are valid jinja2 variable names # http://jinja.pocoo.org/docs/2.10/api/#identifier-naming match = re.match(r"([a-zA-Z_][a-zA-Z0-9_]*)=(.+)", path) if match: namespace, path = match.groups() logging.info( f"Using the {namespace!r} namespace for template variables from {path!r}" ) try: if match: obj = {namespace: read_serialized_data(path)} else: obj = read_serialized_dict(path) except Exception: logging.exception(f"Error reading template variables from {path!r}") continue assert isinstance(obj, dict) conflicts = variables.keys() & obj.keys() if conflicts: logging.warning( f"Template variables in {path!r} overwrite existing " "values for the following keys:\n" + "\n".join(conflicts) ) variables.update(obj) logging.debug( f"Reading user-provided templating variables complete:\n" f"{json.dumps(variables, indent=2, ensure_ascii=False)}" ) return variables def add_author_affiliations(variables: dict) -> dict: """ Edit variables to contain numbered author affiliations. Specifically, add a list of affiliation_numbers for each author and add a list of affiliations to the top-level of variables. If no authors have any affiliations, variables is left unmodified. """ rows = list() for author in variables["authors"]: if "affiliations" not in author: continue if not isinstance(author["affiliations"], list): warnings.warn( f"Expected list for {author['name']}'s affiliations. " f"Assuming multiple affiliations are `; ` separated. " f"Please switch affiliations to a list.", category=DeprecationWarning, ) author["affiliations"] = author["affiliations"].split("; ") for affiliation in author["affiliations"]: rows.append((author["name"], affiliation)) if not rows: return variables affil_map_df = pandas.DataFrame(rows, columns=["name", "affiliation"]) affiliation_df = affil_map_df[["affiliation"]].drop_duplicates() affiliation_df["affiliation_number"] = range(1, 1 + len(affiliation_df)) affil_map_df = affil_map_df.merge(affiliation_df) name_to_numbers = { name: sorted(df.affiliation_number) for name, df in affil_map_df.groupby("name") } for author in variables["authors"]: author["affiliation_numbers"] = name_to_numbers.get(author["name"], []) variables["affiliations"] = affiliation_df.to_dict(orient="records") return variables def load_variables(args) -> dict: """ Read `metadata.yaml` and files specified by `--template-variables-path` to generate manuscript variables available for jinja2 templating. Returns a dictionary, refered to as `variables`, with the following keys: - `pandoc`: a dictionary for passing options to Pandoc via the `yaml_metadata_block`. Fields in `pandoc` are either generated by Manubot or hard-coded by the user if `metadata.yaml` includes a `pandoc` dictionary. - `manubot`: a dictionary for manubot-related information and metadata. Fields in `manubot` are either generated by Manubot or hard-coded by the user if `metadata.yaml` includes a `manubot` dictionary. - All fields from a manuscript's `metadata.yaml` that are not interpreted by Manubot are copied to `variables`. Interpreted fields include `pandoc`, `manubot`, `title`, `keywords`, `authors` (formerly `author_info`, now deprecated), `lang`, and `thumbnail`. - User-specified fields inserted according to the `--template-variables-path` option. User-specified variables take highest precedence and can overwrite values for existing keys like `pandoc` or `manubot` (dangerous). """ # Generated manuscript variables variables = {"pandoc": {}, "manubot": {}} # Read metadata which contains pandoc_yaml_metadata # as well as authors information. if args.meta_yaml_path.is_file(): metadata = read_serialized_dict(args.meta_yaml_path) else: metadata = {} logging.warning( f"missing {args.meta_yaml_path} file with yaml_metadata_block for pandoc" ) # Interpreted keys that are intended for pandoc move_to_pandoc = "title", "keywords", "lang" for key in move_to_pandoc: if key in metadata: variables["pandoc"][key] = metadata.pop(key) # Add date to metadata now = datetime_now() logging.info( f"Using {now:%Z} timezone.\n" f"Dating manuscript with the current datetime: {now.isoformat()}" ) variables["pandoc"]["date-meta"] = now.date().isoformat() variables["manubot"]["date"] = f"{now:%B} {now.day}, {now.year}" # Process authors metadata if "author_info" in metadata: authors = metadata.pop("author_info", []) warnings.warn( "metadata.yaml: 'author_info' is deprecated. Use 'authors' instead.", category=DeprecationWarning, ) else: authors = metadata.pop("authors", []) if authors is None: authors = [] variables["pandoc"]["author-meta"] = [author["name"] for author in authors] variables["manubot"]["authors"] = authors add_author_affiliations(variables["manubot"]) # Set repository version metadata for CI builds ci_params = get_continuous_integration_parameters() if ci_params: variables["manubot"]["ci_source"] = ci_params # Add manuscript URLs variables["manubot"].update(get_manuscript_urls(metadata.pop("html_url", None))) # Add software versions variables["manubot"].update(get_software_versions()) # Add thumbnail URL if present thumbnail_url = get_thumbnail_url(metadata.pop("thumbnail", None)) if thumbnail_url: variables["manubot"]["thumbnail_url"] = thumbnail_url # Update variables with metadata.yaml pandoc/manubot dicts for key in "pandoc", "manubot": dict_ = metadata.pop(key, {}) if not isinstance(dict_, dict): logging.warning( f"load_variables expected metadata.yaml field {key!r} to be a dict." f"Received a {dict_.__class__.__name__!r} instead." ) continue variables[key].update(dict_) # Update variables with uninterpreted metadata.yaml fields variables.update(metadata) # Update variables with user-provided variables here variables = read_variable_files(args.template_variables_path, variables) # Add header-includes metadata with <meta> information for the HTML output's <head> variables["pandoc"]["header-includes"] = get_header_includes(variables) assert args.skip_citations # Extend Pandoc's metadata.bibliography field with manual references paths bibliographies = variables["pandoc"].get("bibliography", []) if isinstance(bibliographies, str): bibliographies = [bibliographies] assert isinstance(bibliographies, list) bibliographies.extend(args.manual_references_paths) bibliographies = list(map(os.fspath, bibliographies)) variables["pandoc"]["bibliography"] = bibliographies # enable pandoc-manubot-cite option to write bibliography to a file variables["pandoc"]["manubot-output-bibliography"] = os.fspath(args.references_path) variables["pandoc"]["manubot-output-citekeys"] = os.fspath(args.citations_path) variables["pandoc"]["manubot-requests-cache-path"] = os.fspath( args.requests_cache_path ) variables["pandoc"]["manubot-clear-requests-cache"] = args.clear_requests_cache return variables def get_citekeys_df(citekeys: list, citekey_aliases: dict = {}): """ Generate and return citekeys_df. citekeys_df is a pandas.DataFrame with the following columns: - manuscript_citekey: citation keys extracted from the manuscript content files. - detagged_citekey: manuscript_citekey but with tag citekeys dereferenced - standard_citekey: detagged_citekey standardized - short_citekey: standard_citekey hashed to create a shortened citekey """ citekeys_df = pandas.DataFrame( {"manuscript_citekey": list(citekeys)} ).drop_duplicates() citekeys_df["detagged_citekey"] = citekeys_df.manuscript_citekey.map( lambda citekey: citekey_aliases.get(citekey, citekey) ) for citation in citekeys_df.detagged_citekey: is_valid_citekey(citation, allow_raw=True) citekeys_df["standard_citekey"] = citekeys_df.detagged_citekey.map( standardize_citekey ) citekeys_df["short_citekey"] = citekeys_df.standard_citekey.map(shorten_citekey) citekeys_df = citekeys_df.sort_values(["standard_citekey", "detagged_citekey"]) check_collisions(citekeys_df) check_multiple_citation_strings(citekeys_df) return citekeys_df def read_citations_tsv(path) -> dict: """ Read citekey aliases from a citation-tags.tsv file. """ if not path.is_file(): logging.info( f"no citation tags file at {path} " "Not reading citekey_aliases from citation-tags.tsv." ) return {} tag_df = pandas.read_csv(path, sep="\t") na_rows_df = tag_df[tag_df.isnull().any(axis="columns")] if not na_rows_df.empty: logging.error( f"{path} contains rows with missing values:\n" f"{na_rows_df}\n" "This error can be caused by using spaces rather than tabs to delimit fields.\n" "Proceeding to reread TSV with delim_whitespace=True." ) tag_df = pandas.read_csv(path, delim_whitespace=True) tag_df["manuscript_citekey"] = "tag:" + tag_df.tag tag_df = tag_df.rename(columns={"citation": "detagged_citekey"}) citekey_aliases = dict( zip(tag_df["manuscript_citekey"], tag_df["detagged_citekey"]) ) return citekey_aliases def write_citekeys_tsv(citekeys_df, path): if not path: return citekeys_df.to_csv(path, sep="\t", index=False) def _citation_tags_to_reference_links(args) -> str: """ Convert citation-tags.tsv to markdown reference link syntax """ citekey_aliases = read_citations_tsv(args.citation_tags_path) if not citekey_aliases: return "" text = "\n\n" for key, value in citekey_aliases.items(): text += f"[@{key}]: {value}\n" logging.warning( "citation-tags.tsv is deprecated. " f"Consider deleting citation-tags.tsv and inserting the following paragraph into your Markdown content:{text}" ) return text def generate_csl_items( citekeys: list, manual_refs: dict = {}, requests_cache_path: Optional[str] = None, clear_requests_cache: Optional[bool] = False, ) -> list: """ General CSL (citeproc) items for standard_citekeys in citekeys_df. Parameters: - citekeys: list of standard_citekeys - manual_refs: mapping from standard_citekey to csl_item for manual references - requests_cache_path: path for the requests cache database. Passed as cache_name to `requests_cache.install_cache`. requests_cache may append an extension to this path, so it is not always the exact path to the cache. If None, do not use requests_cache. - clear_requests_cache: If True, clear the requests cache before generating citekey metadata. """ # Deduplicate citations citekeys = list(dict.fromkeys(citekeys)) # Install cache if requests_cache_path is not None: requests # require `import requests` in case this is essential for monkey patching by requests_cache. requests_cache.install_cache(requests_cache_path, include_get_headers=True) cache = requests_cache.get_cache() if clear_requests_cache: logging.info("Clearing requests-cache") requests_cache.clear() logging.info( f"requests-cache starting with {len(cache.responses)} cached responses" ) csl_items = list() failures = list() for standard_citekey in citekeys: if standard_citekey in manual_refs: csl_items.append(manual_refs[standard_citekey]) continue elif standard_citekey.startswith("raw:"): logging.error( f"CSL JSON Data with a standard_citekey of {standard_citekey!r} not found in manual-references.json. " "Metadata must be provided for raw citekeys." ) failures.append(standard_citekey) try: csl_item = citekey_to_csl_item(standard_citekey) csl_items.append(csl_item) except Exception: logging.exception(f"Citeproc retrieval failure for {standard_citekey!r}") failures.append(standard_citekey) # Uninstall cache if requests_cache_path is not None: logging.info( f"requests-cache finished with {len(cache.responses)} cached responses" ) requests_cache.uninstall_cache() if failures: message = "CSL JSON Data retrieval failed for the following standardized citation keys:\n{}".format( "\n".join(failures) ) logging.error(message) return csl_items def _generate_csl_items(args, citekeys_df): """ General CSL (citeproc) items for standard_citekeys in citekeys_df. Writes references.json to disk and logs warnings for potential problems. """ # Read manual references (overrides) in JSON CSL manual_refs = load_manual_references(args.manual_references_paths) # Retrieve CSL Items csl_items = generate_csl_items( citekeys=citekeys_df.standard_citekey.unique(), manual_refs=manual_refs, requests_cache_path=args.requests_cache_path, clear_requests_cache=args.clear_requests_cache, ) # Write CSL JSON bibliography for Pandoc. write_csl_json(csl_items, args.references_path) return csl_items def write_csl_json(csl_items, path): """ Write CSL Items to a JSON file at `path`. If `path` evaluates as False, do nothing. """ if not path: return path = pathlib.Path(path) with path.open("w", encoding="utf-8") as write_file: json.dump(csl_items, write_file, indent=2, ensure_ascii=False) write_file.write("\n") def template_with_jinja2(text, variables): """ Template using jinja2 with the variables dictionary unpacked as keyword arguments. """ jinja_environment = jinja2.Environment( loader=jinja2.BaseLoader(), undefined=jinja2.make_logging_undefined(logging.getLogger()), autoescape=False, comment_start_string="{##", comment_end_string="##}", extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"], ) template = jinja_environment.from_string(text) return template.render(**variables) def prepare_manuscript(args): """ Compile manuscript, creating manuscript.md and references.json as inputs for pandoc. """ text = get_text(args.content_directory) assert args.skip_citations text += _citation_tags_to_reference_links(args) variables = load_variables(args) variables["manubot"]["manuscript_stats"] = get_manuscript_stats(text) with args.variables_path.open("w", encoding="utf-8") as write_file: json.dump(variables, write_file, ensure_ascii=False, indent=2) write_file.write("\n") text = template_with_jinja2(text, variables) # Write manuscript for pandoc with args.manuscript_path.open("w", encoding="utf-8") as write_file: yaml.dump( variables["pandoc"], write_file, default_flow_style=False, explicit_start=True, explicit_end=True, width=float("inf"), ) write_file.write("\n") write_file.write(text)
37.496063
118
0.679546
0
0
0
0
0
0
0
0
8,414
0.441726
5d3efa01c738d69c4c33cd7d548df41216a056d7
3,738
py
Python
iba_scrape.py
wmwilcox/mix-mind
02da016f314bb5f30f267f1f46c67c6d4a4c370c
[ "Apache-2.0" ]
1
2021-05-02T19:50:44.000Z
2021-05-02T19:50:44.000Z
iba_scrape.py
wmwilcox/mix-mind
02da016f314bb5f30f267f1f46c67c6d4a4c370c
[ "Apache-2.0" ]
34
2018-08-07T13:09:29.000Z
2021-05-13T17:25:18.000Z
iba_scrape.py
wmwilcox/mix-mind
02da016f314bb5f30f267f1f46c67c6d4a4c370c
[ "Apache-2.0" ]
4
2019-02-14T04:17:24.000Z
2021-05-14T15:33:39.000Z
#! /usr/bin/env python # scrape the IBA pages for cocktail lists import sys import xml.etree.ElementTree as ET from lxml import html import requests from pprint import pprint from collections import OrderedDict import json url = 'http://iba-world.com/new-era-drinks/' jsonfile = 'IBA_new_era_drinks.json' url = 'http://iba-world.com/iba-cocktails/' jsonfile = 'IBA_unforgettables.json' url = 'http://iba-world.com/contemporary-classics/' jsonfile = 'IBA_contemporary_classics.json' jsonfile = 'IBA_.json' recipes = OrderedDict() page = requests.get(url) tree = html.fromstring(page.content) items = tree.findall(".//div[@class='blog_list_item_lists']") for item in items: name = item.find(".//h3").text name = ' '.join([word.capitalize() for word in name.split()]) body = item.find(".//div[@class='blog_text']") recipes[name] = {'unit': 'cL'} print name children = [c for c in body.iterchildren()] n = 0 if children[1].tag == 'ul': n = -1 style = children[n+1].text if style is None: try: style = children[n+1].find('span').text except: pass recipes[name]['style'] = style recipes[name]['ingredients'] = OrderedDict() if not children[n+2].tag == 'ul': print "adapting <p> ingredients:", children[n+2].text ing_list = ET.tostring(children[n+2]).lstrip('<p>').rstrip('</p>\n').split('<br />\n') else: ing_list = [i.text for i in children[n+2].iterchildren()] for ingredient in ing_list: if len(ingredient.split()) == 1: recipes[name]['ingredients'][ingredient.lower()] = '' continue unit = ingredient.split()[1].lower() if unit == 'cl': recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = float(ingredient.split()[0]) elif unit == 'bar' or unit == 'to': # bar spoon recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[3:]])] = ' '.join(ingredient.split()[:3]) elif unit == 'dashes' or unit == 'drops' or unit == 'with': recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = ' '.join(ingredient.split()[:2]) elif unit == 'dash': recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = 'dash' else: print "using literal: ", ingredient literal = {'1': 'one', '2': 'two', 'A': 'one'} try: recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[1:]])] = literal[ingredient.split()[0]] except: recipes[name]['ingredients'][ingredient.lower()] = '' # Get full description from the link ref_url = item.find(".//a[@class='top_hover_image']").attrib.get('href') detail_page = requests.get(ref_url) detail_tree = html.fromstring(detail_page.content) use_next = False for child in detail_tree.find(".//div[@class='col-sm-9']").iterchildren(): if use_next and child.tag == 'p': recipes[name]['IBA_description'] = child.text break if child.tag =='ul': use_next = True with open(jsonfile, 'w') as fp: json.dump(recipes, fp, indent=4, separators=(',', ': ')) print "Wrote out as {}".format(jsonfile) sys.exit(0) raw = sys.argv[1] with open(raw) as fp: for line in fp.readlines(): if line.lstrip().startswith(r'<h3>'): print line.lstrip() # super hax if line.startswith(r'<p>'): print line if line.startswith(r'<li>'): print line if not line.lstrip().startswith('<'): print line
33.981818
132
0.584805
0
0
0
0
0
0
0
0
847
0.226592
5d3f1eebd4bcf21a7d4d5c5ef291d2d1f120515e
1,101
py
Python
Data Structures/Tree.py
Royals-Aeo-Gamer/MyPyMods
be3a521e9f823ce0b704f925b19f6f34dcb5405d
[ "MIT" ]
null
null
null
Data Structures/Tree.py
Royals-Aeo-Gamer/MyPyMods
be3a521e9f823ce0b704f925b19f6f34dcb5405d
[ "MIT" ]
null
null
null
Data Structures/Tree.py
Royals-Aeo-Gamer/MyPyMods
be3a521e9f823ce0b704f925b19f6f34dcb5405d
[ "MIT" ]
null
null
null
class TreeNode: def __init__(self, name, data, parent=None): self.name = name self.parent = parent self.data = data self.childs = {} def add_child(self, name, data): self.childs.update({name:(type(self))(name, data, self)}) def rm_branch(self, name, ansistors_n: list = None,): focus = self.childs while True: if ansistors_n == None or ansistors_n == self.name: del focus[name] break elif ansistors_n[0] in focus: focus = (focus[ansistors_n[0]]).childs del ansistors_n[0] elif name in focus and ansistors_n is None: del focus[name] break else: print(focus) raise NameError(f"couldn't find branch {ansistors_n[0]}") def __getitem__(self, item): return self.childs[item] def __setitem__(self, key, value): self.childs[key] = value def __delitem__(self, key, ansistors_n: list = None): self.rm_branch(key, ansistors_n)
31.457143
73
0.551317
1,099
0.998183
0
0
0
0
0
0
40
0.036331
5d3f6e4da89be36858bff2a63bb4de2ff240849a
244
py
Python
config.py
ggiaquin16/GroupProject19
f491abc4e8f127552dc7384f3378e14029da8008
[ "CC-BY-3.0" ]
null
null
null
config.py
ggiaquin16/GroupProject19
f491abc4e8f127552dc7384f3378e14029da8008
[ "CC-BY-3.0" ]
null
null
null
config.py
ggiaquin16/GroupProject19
f491abc4e8f127552dc7384f3378e14029da8008
[ "CC-BY-3.0" ]
null
null
null
api_key = "9N7hvPP9yFrjBnELpBdthluBjiOWzJZw" mongo_url = 'mongodb://localhost:27017' mongo_db = 'CarPopularity' mongo_collections = ['CarSalesByYear', 'PopularCarsByRegion'] years_data = ['2019', '2018', '2017', '2016', '2015'] test_mode = True
40.666667
61
0.75
0
0
0
0
0
0
0
0
143
0.586066
5d4008c47be6196efe901a8e83cca011533d0bf1
2,648
py
Python
pytorch_ares/pytorch_ares/attack_torch/mim.py
thu-ml/realsafe
474d549aa402b4cdd5e3629d23d035c31b60a360
[ "MIT" ]
107
2020-06-15T09:55:11.000Z
2020-12-20T11:27:11.000Z
pytorch_ares/pytorch_ares/attack_torch/mim.py
haichen-ber/ares
474d549aa402b4cdd5e3629d23d035c31b60a360
[ "MIT" ]
7
2020-06-14T03:00:18.000Z
2020-12-07T07:10:10.000Z
pytorch_ares/pytorch_ares/attack_torch/mim.py
haichen-ber/ares
474d549aa402b4cdd5e3629d23d035c31b60a360
[ "MIT" ]
19
2020-06-14T08:35:33.000Z
2020-12-19T13:43:41.000Z
import imp import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pytorch_ares.attack_torch.utils import loss_adv class MIM(object): '''Projected Gradient Descent''' def __init__(self, net, epsilon, p, stepsize, steps, decay_factor, data_name,target, loss, device): self.epsilon = epsilon self.p = p self.net = net self.decay_factor = decay_factor self.stepsize = stepsize self.target = target self.steps = steps self.loss = loss self.data_name = data_name self.device = device if self.data_name=="cifar10" and self.target: raise AssertionError('cifar10 dont support targeted attack') def forward(self, image, label, target_labels): image, label = image.to(self.device), label.to(self.device) if target_labels is not None: target_labels = target_labels.to(self.device) batchsize = image.shape[0] advimage = image momentum = torch.zeros_like(image).detach() # PGD to get adversarial example for i in range(self.steps): advimage = advimage.clone().detach().requires_grad_(True) # clone the advimage as the next iteration input netOut = self.net(advimage) loss = loss_adv(self.loss, netOut, label, target_labels, self.target, self.device) grad = torch.autograd.grad(loss, [advimage])[0].detach() grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1) grad = grad / grad_norm.view([-1]+[1]*(len(grad.shape)-1)) grad = grad + momentum*self.decay_factor momentum = grad if self.p==np.inf: updates = grad.sign() else: normVal = torch.norm(grad.view(batchsize, -1), self.p, 1) updates = grad/normVal.view(batchsize, 1, 1, 1) updates = updates*self.stepsize advimage = advimage+updates # project the disturbed image to feasible set if needed delta = advimage-image if self.p==np.inf: delta = torch.clamp(delta, -self.epsilon, self.epsilon) else: normVal = torch.norm(delta.view(batchsize, -1), self.p, 1) mask = normVal<=self.epsilon scaling = self.epsilon/normVal scaling[mask] = 1 delta = delta*scaling.view(batchsize, 1, 1, 1) advimage = image+delta advimage = torch.clamp(advimage, 0, 1)#cifar10(-1,1) return advimage
39.522388
118
0.583837
2,496
0.942598
0
0
0
0
0
0
228
0.086103
5d4029e498cad9d638e5fe5f4c3a65f28490da96
303
py
Python
src/utils/templatetags/menubutton.py
pwelzel/bornhack-website
af794e6a2fba06e09626259c7768feb30ff394be
[ "BSD-3-Clause" ]
null
null
null
src/utils/templatetags/menubutton.py
pwelzel/bornhack-website
af794e6a2fba06e09626259c7768feb30ff394be
[ "BSD-3-Clause" ]
null
null
null
src/utils/templatetags/menubutton.py
pwelzel/bornhack-website
af794e6a2fba06e09626259c7768feb30ff394be
[ "BSD-3-Clause" ]
null
null
null
from django import template register = template.Library() @register.simple_tag(takes_context=True) def menubuttonclass(context, appname): if appname == context['request'].resolver_match.func.view_class.__module__.split(".")[0]: return "btn-primary" else: return "btn-default"
25.25
93
0.716172
0
0
0
0
241
0.79538
0
0
38
0.125413
5d41c3b8ea2fc0ea3e45c5b6768c95bfbb166b0c
1,965
py
Python
wiki/tests.py
Prones94/Make_Wiki
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
[ "MIT" ]
null
null
null
wiki/tests.py
Prones94/Make_Wiki
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
[ "MIT" ]
5
2020-06-06T01:41:16.000Z
2021-06-10T20:09:01.000Z
wiki/tests.py
Prones94/Make_Wiki
f8816eb31bb370f48affff8568a6b0d0ffaf7cd4
[ "MIT" ]
null
null
null
from django.test import TestCase from django.contrib.auth.models import User from wiki.models import Page from django.utils.text import slugify # Create your tests here. class WikiPageTest(TestCase): def test_edit(self): user = User.objects.create_user(username='admin', password='djangopony') self.client.login(username='admin', password='djangopony') page = Page.objects.create(title="My Test Page", content="test", author=user) page.save() edit = { 'title': 'testing title', 'content': 'testing content' } response = self.client.post('/%s/' %slugify(page.title), edit) updated = Page.objects.get(title = edit['title']) self.assertEqual(response.status_code, 302) self.assertEqual(updated.title, edit['title']) def test_page(self): user = User.objects.create_user(username='admin', password='djangopony') self.client.login(username='admin', password='djangopony') page = Page.objects.create(title="My Test Page", content="test", author=user) page.save() response = self.client.get('/%s/' %slugify(page.title)) self.assertEqual(response.status_code, 200) self.assertContains(response, 'test') def test_create(self): user = User.objects.create_user(username='admin', password='djangopony') self.client.login(username='admin', password='djangopony') new = { 'title': 'testing title', 'content': 'testing content' } response = self.client.post('/wiki/new/', new) updated = Page.objects.get(title = new['title']) self.assertEqual(response.status_code, 302) self.assertEqual(updated.title, new['title']) ''' Steps to writing a test 1. Set up your test data 2. Make a request (GET, POST) 3a. Check if response matches what we expect 3b. Check if database matches what we expect '''
33.87931
85
0.641221
1,601
0.814758
0
0
0
0
0
0
525
0.267176
5d42291558faa9e742ab82a57f0f93c0ba5ed168
66
py
Python
birdy/__init__.py
tkiapril/birdy
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
[ "MIT" ]
1
2015-01-07T17:47:54.000Z
2015-01-07T17:47:54.000Z
birdy/__init__.py
tkiapril/birdy
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
[ "MIT" ]
null
null
null
birdy/__init__.py
tkiapril/birdy
cf6a8f8d31c9363dbf7398ae3d78fe3069a5a936
[ "MIT" ]
null
null
null
__author__ = 'Mitja Pagon <[email protected]>' __version__ = '0.2'
22
45
0.712121
0
0
0
0
0
0
0
0
37
0.560606
5d424aaa1fdb6fb518af8c5169d61b82bae9ef0f
1,928
py
Python
ares/defense/randomization.py
KuanKuanQAQ/ares
40dbefc18f6438e1812021fe6d6c3195f22ca295
[ "MIT" ]
206
2020-12-31T09:43:11.000Z
2022-03-30T07:02:41.000Z
ares/defense/randomization.py
afoolboy/ares
89610d41fdde194e4ad916d29961aaed73383692
[ "MIT" ]
7
2021-01-26T06:45:44.000Z
2022-02-26T05:25:48.000Z
ares/defense/randomization.py
afoolboy/ares
89610d41fdde194e4ad916d29961aaed73383692
[ "MIT" ]
61
2020-12-29T14:02:41.000Z
2022-03-26T14:21:10.000Z
''' The randomization defense method, which applies random . ''' import tensorflow as tf from ares.defense.input_transformation import input_transformation def randomize(xs, scale_min=0.875, pad_value=0.0): ''' Apply random rescaling and padding to xs. :param xs: A batch of inputs for some classifier. :param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0. :param pad_value: ``constant_values`` parameter for the ``tf.pad`` method. :return: A new tensor with same shape and dtype as xs. ''' ratio = tf.random.uniform((), minval=scale_min, maxval=1.0) height, width = tf.cast(xs.shape[1].value * ratio, tf.int32), tf.cast(xs.shape[2].value * ratio, tf.int32) xs_rescaled = tf.image.resize(xs, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True, preserve_aspect_ratio=False) height_rem, width_rem = xs.shape[1].value - height, xs.shape[2].value - width pad_left = tf.random_uniform((), 0, width_rem, dtype=tf.int32) pad_right = width_rem - pad_left pad_top = tf.random_uniform((), 0, height_rem, dtype=tf.int32) pad_bottom = height_rem - pad_top xs_padded = tf.pad(xs_rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], constant_values=pad_value) xs_padded.set_shape(xs.shape) return xs_padded def randomization(scale_min=0.875, pad_value=0.0): ''' A decorator to apply randomize rescaling and padding to input of the classifier. :param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0. :param pad_value: ``constant_values`` parameter for the ``tf.pad`` method. ''' def args_fn(_): return (scale_min, pad_value) def kwargs_fn(_): return {} return lambda rs_class: input_transformation(rs_class, randomize, args_fn, kwargs_fn)
43.818182
110
0.688797
0
0
0
0
0
0
0
0
672
0.348548
5d43ba93812ece31b158196b6ad2d32a374bd0f8
147
py
Python
annotate/backend/admin.py
hopeogbons/image-annotation
2d8b1799bc791428fd3ab29d8052195996923130
[ "Apache-2.0" ]
null
null
null
annotate/backend/admin.py
hopeogbons/image-annotation
2d8b1799bc791428fd3ab29d8052195996923130
[ "Apache-2.0" ]
11
2021-03-09T10:15:39.000Z
2022-02-26T13:53:51.000Z
annotate/backend/admin.py
hopeogbons/image-annotation
2d8b1799bc791428fd3ab29d8052195996923130
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from annotate.backend.models import Image, Annotation admin.site.register(Image) admin.site.register(Annotation)
24.5
53
0.836735
0
0
0
0
0
0
0
0
0
0
5d451217c589da4fbfb78dd271865830d16162d1
826
py
Python
34. Find First and Last Position of Element in Sorted Array/main.py
Competitive-Programmers-Community/LeetCode
841fdee805b1a626e9f1cd0e12398d25054638af
[ "MIT" ]
2
2019-10-05T09:48:20.000Z
2019-10-05T15:40:01.000Z
34. Find First and Last Position of Element in Sorted Array/main.py
Competitive-Programmers-Community/LeetCode
841fdee805b1a626e9f1cd0e12398d25054638af
[ "MIT" ]
null
null
null
34. Find First and Last Position of Element in Sorted Array/main.py
Competitive-Programmers-Community/LeetCode
841fdee805b1a626e9f1cd0e12398d25054638af
[ "MIT" ]
null
null
null
class Solution: def searchRange(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ if not nums: return [-1, -1] low = 0 high = len(nums) - 1 f = 0 while low<=high: mid = (low+high)//2 if nums[mid] == target: f = 1 break elif nums[mid] < target: low = mid + 1 elif nums[mid] > target: high = mid - 1 i, j = mid, mid while i>=1 and nums[i-1] == target: i = i-1 while j<len(nums)-1 and nums[j+1] == target: j = j+1 if f == 1: return [i, j] else: return [-1, -1]
24.294118
52
0.361985
825
0.998789
0
0
0
0
0
0
97
0.117433
5d4867285dd6af6ea7e2fbae337fc27c75376241
1,893
py
Python
c/create.py
LMS57/domato
005739f55b49ead0ac47ea14b324decee05a7625
[ "Apache-2.0" ]
null
null
null
c/create.py
LMS57/domato
005739f55b49ead0ac47ea14b324decee05a7625
[ "Apache-2.0" ]
null
null
null
c/create.py
LMS57/domato
005739f55b49ead0ac47ea14b324decee05a7625
[ "Apache-2.0" ]
null
null
null
data = open('./original').readlines() alphabet = { "<":"lt", ">":"gt", "=":"=", "-":'-', "+":"+", "-":"-", "~":"~", "!":"ex", "%":"%", "^":"^", "&":"&", "*":"*", "(":"(", ")":"right_paran", "[":"[", "]":"]", "{":"{", "}":"}", "[":"[", "]":"]", "|":"|", ";":";", ":":":", ",":",", ".":".", "?":"?", "/":"/", } def item(y): if "'" in y: tmp = y.split("'")[1] test = 0 for x in alphabet: if x in tmp: test = 1 if test: final = '' for x in tmp: final += item(alphabet[x]) return final else: return item(tmp) else: return "<"+y+">" start = 0 current = "" space = "<space>" declared = [] referenced = [] for x in data: x = x.strip() if x == "": continue if '%%' == x: start = 1 continue elif start != 1: continue if x == "test": break; x = x.split(' ') if len(x) == 1:#item declaration or end if x[0] == ';': current = "" else: current = x[0] declared.append(item(x[0])) print "" else: x = x[1:] tmp = item(current)+'\t=\t' for y in range(len(x)): referenced.append(item(x[y])) tmp += item(x[y]) if y != len(x)-1 and "'" not in x[y+1] and "'" not in x[y]: tmp+=space print tmp referenced = set(referenced) final = [] for x in referenced: if x not in declared: final.append(x) print "" for x in final: tmp = x+'\t=\t' x = x[1:-1] print tmp + x.lower()
18.742574
71
0.320655
0
0
0
0
0
0
0
0
280
0.147913
5d48a2b09ec3e91f3ac7c94a610ddffec5774abc
10,500
py
Python
AppServer/google/appengine/api/memcache/memcache_distributed.py
isabella232/scale-safe
8b887726768106b6b67d7be6ea257bee5cd83f9a
[ "Apache-2.0" ]
3
2016-06-12T01:18:49.000Z
2018-07-16T18:20:23.000Z
AppServer/google/appengine/api/memcache/memcache_distributed.py
davgit/appscale
17d35a14fa5a56975de1e3517bec9e7f9047d82a
[ "Apache-2.0" ]
1
2021-06-08T10:04:35.000Z
2021-06-08T10:04:35.000Z
AppServer/google/appengine/api/memcache/memcache_distributed.py
davgit/appscale
17d35a14fa5a56975de1e3517bec9e7f9047d82a
[ "Apache-2.0" ]
1
2020-05-25T02:59:15.000Z
2020-05-25T02:59:15.000Z
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Non-stub version of the memcache API, keeping all data in memcached. Uses the python-memcached library to interface with memcached. """ import base64 import cPickle import logging import memcache import os import time from google.appengine.api import apiproxy_stub from google.appengine.api.memcache import memcache_service_pb from google.appengine.runtime import apiproxy_errors MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse from google.appengine.api.memcache import TYPE_INT from google.appengine.api.memcache import TYPE_LONG class MemcacheService(apiproxy_stub.APIProxyStub): """Python only memcache service. This service keeps all data in any external servers running memcached. """ # The memcached default port. MEMCACHE_PORT = "11211" # An AppScale file which has a list of IPs running memcached. APPSCALE_MEMCACHE_FILE = "/etc/appscale/memcache_ips" # The minimum frequency by which memcache clients will update their list of # clients that they connect to (which can change if AppScale scales up or # down). UPDATE_WINDOW = 60 # seconds def __init__(self, gettime=time.time, service_name='memcache'): """Initializer. Args: gettime: time.time()-like function used for testing. service_name: Service name expected for all calls. """ super(MemcacheService, self).__init__(service_name) self._gettime = gettime self._memcache = None self.setupMemcacheClient() def setupMemcacheClient(self): """ Sets up the memcache client. """ if os.path.exists(self.APPSCALE_MEMCACHE_FILE): memcache_file = open(self.APPSCALE_MEMCACHE_FILE, "r") all_ips = memcache_file.read().split("\n") memcache_file.close() else: all_ips = ['localhost'] memcaches = [ip + ":" + self.MEMCACHE_PORT for ip in all_ips if ip != ''] memcaches.sort() self._memcache = memcache.Client(memcaches, debug=0) def _Dynamic_Get(self, request, response): """Implementation of gets for memcache. Args: request: A MemcacheGetRequest protocol buffer. response: A MemcacheGetResponse protocol buffer. """ for key in set(request.key_list()): internal_key = self._GetKey(request.name_space(), key) value = self._memcache.get(internal_key) if value is None: continue flags = 0 stored_flags, cas_id, stored_value = cPickle.loads(value) flags |= stored_flags item = response.add_item() item.set_key(key) item.set_value(stored_value) item.set_flags(flags) if request.for_cas(): item.set_cas_id(cas_id) def _Dynamic_Set(self, request, response): """Implementation of sets for memcache. Args: request: A MemcacheSetRequest. response: A MemcacheSetResponse. """ for item in request.item_list(): key = self._GetKey(request.name_space(), item.key()) set_policy = item.set_policy() old_entry = self._memcache.get(key) cas_id = 0 if old_entry: _, cas_id, _ = cPickle.loads(old_entry) set_status = MemcacheSetResponse.NOT_STORED if ((set_policy == MemcacheSetRequest.SET) or (set_policy == MemcacheSetRequest.ADD and old_entry is None) or (set_policy == MemcacheSetRequest.REPLACE and old_entry is not None)): if (old_entry is None or set_policy == MemcacheSetRequest.SET): set_status = MemcacheSetResponse.STORED elif (set_policy == MemcacheSetRequest.CAS and item.for_cas() and item.has_cas_id()): if old_entry is None: set_status = MemcacheSetResponse.NOT_STORED elif cas_id != item.cas_id(): set_status = MemcacheSetResponse.EXISTS else: set_status = MemcacheSetResponse.STORED if (set_status == MemcacheSetResponse.STORED or set_policy == MemcacheSetRequest.REPLACE): set_value = cPickle.dumps( [item.flags(), cas_id + 1, item.value()]) if set_policy == MemcacheSetRequest.REPLACE: self._memcache.replace(key, set_value) else: self._memcache.set(key, set_value, item.expiration_time()) response.add_set_status(set_status) def _Dynamic_Delete(self, request, response): """Implementation of delete in memcache. Args: request: A MemcacheDeleteRequest protocol buffer. response: A MemcacheDeleteResponse protocol buffer. """ for item in request.item_list(): key = self._GetKey(request.name_space(), item.key()) entry = self._memcache.get(key) delete_status = MemcacheDeleteResponse.DELETED if entry is None: delete_status = MemcacheDeleteResponse.NOT_FOUND else: self._memcache.delete(key) response.add_delete_status(delete_status) def _Increment(self, namespace, request): """Internal function for incrementing from a MemcacheIncrementRequest. Args: namespace: A string containing the namespace for the request, if any. Pass an empty string if there is no namespace. request: A MemcacheIncrementRequest instance. Returns: An integer or long if the offset was successful, None on error. """ if not request.delta(): return None cas_id = 0 key = self._GetKey(namespace, request.key()) value = self._memcache.get(key) if value is None: if not request.has_initial_value(): return None flags, cas_id, stored_value = ( TYPE_INT, cas_id, str(request.initial_value())) else: flags, cas_id, stored_value = cPickle.loads(value) if flags == TYPE_INT: new_value = int(stored_value) elif flags == TYPE_LONG: new_value = long(stored_value) if request.direction() == MemcacheIncrementRequest.INCREMENT: new_value += request.delta() elif request.direction() == MemcacheIncrementRequest.DECREMENT: new_value -= request.delta() new_stored_value = cPickle.dumps([flags, cas_id + 1, str(new_value)]) try: self._memcache.cas(key, new_stored_value) except Exception, e: logging.error(str(e)) return None return new_value def _Dynamic_Increment(self, request, response): """Implementation of increment for memcache. Args: request: A MemcacheIncrementRequest protocol buffer. response: A MemcacheIncrementResponse protocol buffer. """ new_value = self._Increment(request.name_space(), request) if new_value is None: raise apiproxy_errors.ApplicationError( memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR) response.set_new_value(new_value) def _Dynamic_BatchIncrement(self, request, response): """Implementation of batch increment for memcache. Args: request: A MemcacheBatchIncrementRequest protocol buffer. response: A MemcacheBatchIncrementResponse protocol buffer. """ namespace = request.name_space() for request_item in request.item_list(): new_value = self._Increment(namespace, request_item) item = response.add_item() if new_value is None: item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED) else: item.set_increment_status(MemcacheIncrementResponse.OK) item.set_new_value(new_value) def _Dynamic_FlushAll(self, request, response): """Implementation of MemcacheService::FlushAll(). Args: request: A MemcacheFlushRequest. response: A MemcacheFlushResponse. """ self._memcache.flush_all() def _Dynamic_Stats(self, request, response): """Implementation of MemcacheService::Stats(). Args: request: A MemcacheStatsRequest. response: A MemcacheStatsResponse. """ stats = response.mutable_stats() num_servers = 0 hits_total = 0 misses_total = 0 byte_hits_total = 0 items_total = 0 bytes_total = 0 time_total = 0 def get_stats_value(stats_dict, key, _type=int): """ Gets statisical values and makes sure the key is in the dict. """ if key not in stats_dict: logging.warn("No stats for key '%s'." % key) return _type(stats_dict.get(key, '0')) for server, server_stats in self._memcache.get_stats(): num_servers += 1 hits_total += get_stats_value(server_stats, 'get_hits') misses_total += get_stats_value(server_stats, 'get_misses') byte_hits_total += get_stats_value(server_stats, 'bytes_read') items_total += get_stats_value(server_stats, 'curr_items') bytes_total += get_stats_value(server_stats, 'bytes') time_total += get_stats_value(server_stats, 'time', float) stats.set_hits(hits_total) stats.set_misses(misses_total) stats.set_byte_hits(byte_hits_total) stats.set_items(items_total) stats.set_bytes(bytes_total) # With the Python 2.7 GAE runtime, it expects all fields here to be ints. # Python 2.5 was fine with this being a float, so callers in that runtime # may not be expecting an int. stats.set_oldest_item_age(int(time.time() - time_total / num_servers)) def _GetKey(self, namespace, key): """Used to get the Memcache key. It is encoded because the sdk allows special characters but the Memcache client does not. Args: namespace: The namespace as provided by the application. key: The key as provided by the application. Returns: A base64 string __{appname}__{namespace}__{key} """ appname = os.environ['APPNAME'] internal_key = appname + "__" + namespace + "__" + key return base64.b64encode(internal_key)
33.980583
77
0.701619
9,069
0.863714
0
0
0
0
0
0
3,540
0.337143
5d493476e5ae3fc5c2137c7a547ce012434fae4f
4,927
py
Python
inflateutils/exportmesh.py
arpruss/inflatemesh
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
[ "MIT" ]
8
2017-11-30T14:03:25.000Z
2021-03-02T03:16:01.000Z
inflateutils/exportmesh.py
arpruss/inflatemesh
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
[ "MIT" ]
2
2019-03-15T04:10:04.000Z
2021-01-11T17:44:31.000Z
inflateutils/exportmesh.py
arpruss/inflatemesh
ab4abfc7794fd4cf96f41bb797e1b2a61f687a46
[ "MIT" ]
2
2018-04-08T10:59:39.000Z
2021-01-28T03:37:57.000Z
from struct import pack from .vector import * from .formatdecimal import decimal from numbers import Number import os import sys try: basestring except: basestring = str def isColorTriangleList(polys): return isinstance(polys[0][1][0][0], Number) def toPolyhedra(polys): if isColorTriangleList(polys): return [ (polys[0][0], list(face for rgb,face in polys)) ] else: return polys def toMesh(polys): if isColorTriangleList(polys): return polys else: output = [] for rgb,polyhedron in polys: for face in polyhedron: output.append((rgb,face)) return output def describeColor(c): if c is None: return "undef"; elif isinstance(c, str): return c else: return "[%s,%s,%s]" % tuple(decimal(component) for component in c) def toSCADModule(polys, moduleName, digitsAfterDecimal=9, colorOverride=None): """ INPUT: polys: list of (color,polyhedra) pairs (counterclockwise triangles), or a list of (color,triangle) pairs (TODO: currently uses first color for all in latter case) moduleName: OpenSCAD module name OUTPUT: string with OpenSCAD code implementing the polys """ polys = toPolyhedra(polys) scad = [] scad.append("module " +moduleName+ "() {") for rgb,poly in polys: if colorOverride != "" and (colorOverride or rgb): line = " color(%s) " % describeColor(colorOverride if colorOverride else tuple(min(max(c,0.),1.0) for c in rgb)) else: line = " " pointsDict = {} i = 0 line += "polyhedron(points=[" points = [] for face in poly: for v in reversed(face): if tuple(v) not in pointsDict: pointsDict[tuple(v)] = i points.append( ("[%s,%s,%s]") % tuple(decimal(x,digitsAfterDecimal) for x in v) ) i += 1 line += ",".join(points) line += "], faces=[" line += ",".join( "[" + ",".join(str(pointsDict[tuple(v)]) for v in reversed(face)) + "]" for face in poly ) + "]" line += ");" scad.append(line) scad.append("}\n") return "\n".join(scad) def saveSCAD(filename, polys, moduleName="object1", quiet=False): """ filename: filename to write OpenSCAD file polys: list of (color,polyhedra) pairs (counterclockwise triangles) moduleName: OpenSCAD module name quiet: give no status message if set """ if not quiet: sys.stderr.write("Saving %s\n" % filename) if filename: with open(filename, "w") as f: f.write(toSCADModule(polys, moduleName)) f.write("\n" + moduleName + "();\n") else: sys.stdout.write(toSCADModule(polys, moduleName)) sys.stdout.write("\n" + moduleName + "();\n") def saveSTL(filename, mesh, swapYZ=False, quiet=False): """ filename: filename to save STL file mesh: list of (color,triangle) pairs (counterclockwise) swapYZ: should Y/Z axes be swapped? quiet: give no status message if set """ mesh = toMesh(mesh) if not quiet: sys.stderr.write("Saving %s\n" % filename) minY = float("inf") minVector = Vector(float("inf"),float("inf"),float("inf")) numTriangles = 0 if swapYZ: matrix = Matrix( (1,0,0), (0,0,-1), (0,1,0) ) else: matrix = Matrix.identity(3) mono = True for rgb,triangle in mesh: if rgb is not None: mono = False numTriangles += 1 for vertex in triangle: vertex = matrix*vertex minVector = Vector(min(minVector[i], vertex[i]) for i in range(3)) minVector -= Vector(0.001,0.001,0.001) # make sure all STL coordinates are strictly positive as per Wikipedia def writeSTL(write): write(pack("80s",b'')) write(pack("<I",numTriangles)) for rgb,tri in mesh: if mono: color = 0 else: if rgb is None: rgb = (255,255,255) else: rgb = tuple(min(255,max(0,int(0.5 + 255 * comp))) for comp in rgb) color = 0x8000 | ( (rgb[0] >> 3) << 10 ) | ( (rgb[1] >> 3) << 5 ) | ( (rgb[2] >> 3) << 0 ) normal = (Vector(tri[1])-Vector(tri[0])).cross(Vector(tri[2])-Vector(tri[0])).normalize() write(pack("<3f", *(matrix*normal))) for vertex in tri: write(pack("<3f", *(matrix*(vertex-minVector)))) write(pack("<H", color)) if filename: with open(filename, "wb") as f: writeSTL(f.write) else: if sys.platform == "win32": import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) writeSTL(lambda data : os.write(sys.stdout.fileno(), data))
33.517007
166
0.558149
0
0
0
0
0
0
0
0
1,008
0.204587
5d4b0bc52d1482cd0028c140868e692cfb38b3c0
3,982
py
Python
Assignment1/Identification/match_module.py
arywatt/FDS_2020_2021
392f360b219c6ef5e2c685da1f3c8aab7415ce32
[ "MIT" ]
null
null
null
Assignment1/Identification/match_module.py
arywatt/FDS_2020_2021
392f360b219c6ef5e2c685da1f3c8aab7415ce32
[ "MIT" ]
null
null
null
Assignment1/Identification/match_module.py
arywatt/FDS_2020_2021
392f360b219c6ef5e2c685da1f3c8aab7415ce32
[ "MIT" ]
1
2020-10-29T08:38:42.000Z
2020-10-29T08:38:42.000Z
import numpy as np from PIL import Image import matplotlib.pyplot as plt import histogram_module import dist_module def rgb2gray(rgb): r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] gray = 0.2989 * r + 0.5870 * g + 0.1140 * b return gray # model_images - list of file names of model images # query_images - list of file names of query images # # dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect' # hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg' # # note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain # handles to distance and histogram functions, and to find out whether histogram function # expects grayvalue or color image def find_best_match(model_images, query_images, dist_type, hist_type, num_bins): hist_isgray = histogram_module.is_grayvalue_hist(hist_type) model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins) query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins) D = np.zeros((len(model_images), len(query_images))) # compute distance for each couple of query - image for j, query in enumerate(query_hists): for i, model in enumerate(model_hists): D[i, j] = dist_module.get_dist_by_name(model, query, dist_type) best_match = [] # to save best matches # for each query , find best model for j in range(len(query_images)): query_matches = D[:, j] # get query columns from matrix argmin = np.argmin(query_matches) # get index with minimum distance best_match.append(argmin) # save index for query best_match = np.array(best_match) # array of best match for each query return best_match, D def compute_histograms(image_list, hist_type, hist_isgray, num_bins): image_hist = [] # Compute hisgoram for each image and add it at the bottom of image_hist # ... (your code here) for img in image_list: img_color = np.array(Image.open(img)) # if hist is gray type we use gray image # othewise rgb image img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double') # We compute histogram for image hist = histogram_module.get_hist_by_name(img=img_to_process, num_bins_gray=num_bins, hist_name=hist_type ) image_hist.append(hist) return image_hist # For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'. # # Note: use the previously implemented function 'find_best_match' # Note: use subplot command to show all the images in the same Python figure, one row per query image def show_neighbors(model_images, query_images, dist_type, hist_type, num_bins): plt.figure() num_nearest = 5 # show the top-5 neighbors # ... (your code here) _, D = find_best_match(model_images=model_images, query_images=query_images, dist_type=dist_type, hist_type=hist_type, num_bins=num_bins ) Q = len(query_images) pos = 0 for j in range(Q): query_matches = D[:, j] best_args = np.argsort(query_matches)[:num_nearest] query_img = query_images[j] pos += 1 plt.subplot(Q, 6, pos); plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255); plt.title(f'Q{j}') for ind in range(len(best_args)): pos += 1 model_ind = best_args[ind] model_img = model_images[model_ind] plt.subplot(Q, 6, pos); plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255); plt.title(f'MO.{model_ind}') plt.show()
33.745763
101
0.633852
0
0
0
0
0
0
0
0
1,260
0.316424
5d4c8cdbc546fb237f365ef954c77cb12a3738d8
1,566
py
Python
dycco/__main__.py
rojalator/dycco
84ace8727aef84bb3d886cdaa3d3aef1089f1935
[ "MIT" ]
null
null
null
dycco/__main__.py
rojalator/dycco
84ace8727aef84bb3d886cdaa3d3aef1089f1935
[ "MIT" ]
1
2022-03-22T07:35:15.000Z
2022-03-22T09:15:44.000Z
dycco/__main__.py
rojalator/dycco
84ace8727aef84bb3d886cdaa3d3aef1089f1935
[ "MIT" ]
null
null
null
import argparse import logging import sys from .dycco import document def main(paths, output_dir, use_ascii:bool, escape_html:bool, single_file:bool): try: document(paths, output_dir, use_ascii, escape_html, single_file) except IOError as e: logging.error('Unable to open file: %s', e) return 1 except Exception as e: logging.error('An error occurred: %s', e) return 1 else: return 0 if __name__ == '__main__': arg_parser = argparse.ArgumentParser(prog='dycco', description='Literate-style documentation generator.') arg_parser.add_argument('source_file', nargs='+', default=sys.stdin, help='Source files to document') arg_parser.add_argument('-o', '--output-dir', default='docs', help='Output directory (will be created if necessary)') arg_parser.add_argument('-a', '--asciidoc3', action='store_true', default=False, dest='use_ascii', help='Process with asciidoc3 instead of markdown (you will have to install asciidoc3, of course)') arg_parser.add_argument('-e', '--escape-html', action='store_true', default=False, dest='escape_html', help='Run the documentation through html.escape() before markdown or asciidoc3') arg_parser.add_argument('-f', '--single-file', action='store_true', default=False, dest='single_file', help='Just produce a .md or .adoc file in single-column to be processed externally') args = arg_parser.parse_args() sys.exit(main(args.source_file, args.output_dir, args.use_ascii, args.escape_html, args.single_file))
46.058824
121
0.707535
0
0
0
0
0
0
0
0
593
0.378672
5d4c9607e3defd3816cf4fbd7853e01e09dcb111
14,354
py
Python
jumpy/jumpy/ndarray.py
rghwer/testdocs
8fafa40407411ed7a3f8216e691e42e0c7d32083
[ "Apache-2.0" ]
13,006
2015-02-13T18:35:31.000Z
2022-03-18T12:11:44.000Z
jumpy/jumpy/ndarray.py
pxiuqin/deeplearning4j
e11ddf3c24d355b43d36431687b807c8561aaae4
[ "Apache-2.0" ]
5,319
2015-02-13T08:21:46.000Z
2019-06-12T14:56:50.000Z
jumpy/jumpy/ndarray.py
pxiuqin/deeplearning4j
e11ddf3c24d355b43d36431687b807c8561aaae4
[ "Apache-2.0" ]
4,719
2015-02-13T22:48:55.000Z
2022-03-22T07:25:36.000Z
################################################################################ # Copyright (c) 2015-2018 Skymind, Inc. # # This program and the accompanying materials are made available under the # terms of the Apache License, Version 2.0 which is available at # https://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # SPDX-License-Identifier: Apache-2.0 ################################################################################ from .java_classes import * import numpy as np import ctypes import warnings native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps() # DATA TYPE MANAGEMENT DOUBLE = DataType.DOUBLE FLOAT = DataType.FLOAT HALF = DataType.HALF LONG = DataType.LONG INT = DataType.INT SHORT = DataType.SHORT UBYTE = DataType.UBYTE BYTE = DataType.BYTE BOOL = DataType.BOOL UTF8 = DataType.UTF8 COMPRESSED = DataType.COMPRESSED UNKNOWN = DataType.UNKNOWN SUPPORTED_JAVA_DTYPES = [ DOUBLE, FLOAT, HALF, LONG, INT, SHORT, BOOL #UTF8 ] SUPPORTED_PYTHON_DTYPES = [ np.float64, np.float32, np.float16, np.int64, np.int32, np.int16, np.bool_ #np.str_ ] _PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))} _J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))} def _dtype_py2j(dtype): if isinstance(dtype, str): dtype = np.dtype(dtype).type elif isinstance(dtype, np.dtype): dtype = dtype.type jtype = _PY2J.get(dtype) if jtype is None: raise NotImplementedError("Unsupported type: " + dtype.name) return jtype def _dtype_j2py(dtype): pytype = _J2PY.get(dtype) if pytype is None: raise NotImplementedError("Unsupported type: " + (str(dtype))) return pytype def set_context_dtype(dtype): ''' Sets the dtype for nd4j # Arguments dtype: 'float' or 'double' ''' dtype_map = { 'float32': 'float', 'float64': 'double' } dtype = dtype_map.get(dtype, dtype) if dtype not in ['float', 'double']: raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype)) dtype_ = DataTypeUtil.getDtypeFromContext(dtype) DataTypeUtil.setDTypeForContext(dtype_) if get_context_dtype() != dtype: warnings.warn("Can not set context dtype now. Set it at the beginning of your program.") def get_context_dtype(): ''' Returns the nd4j dtype ''' dtype = DataTypeUtil.getDtypeFromContext() return DataTypeUtil.getDTypeForName(dtype) _refs = [] def _from_numpy(np_array): ''' Convert numpy array to nd4j array ''' pointer_address, _ = np_array.__array_interface__['data'] _refs.append(np_array) pointer = native_ops.pointerForAddress(pointer_address) size = np_array.size pointer.limit(size) jdtype = _dtype_py2j(np_array.dtype) ''' mapping = { DOUBLE: DoublePointer, FLOAT: FloatPointer, HALF: HalfPointer, LONG: LongPointer, INT: IntPointer, SHORT: ShortPointer, BOOL: BoolPointer } pc = mapping[jdtype] #pointer = pc(pointer) ''' buff = Nd4j.createBuffer(pointer, size, jdtype) assert buff.address() == pointer_address _refs.append(buff) elem_size = buff.getElementSize() assert elem_size == np_array.dtype.itemsize strides = np_array.strides strides = [dim / elem_size for dim in strides] shape = np_array.shape nd4j_array = Nd4j.create(buff, shape, strides, 0) assert buff.address() == nd4j_array.data().address() return nd4j_array def _to_numpy(nd4j_array): ''' Convert nd4j array to numpy array ''' buff = nd4j_array.data() address = buff.pointer().address() dtype = nd4j_array.dataType().toString() mapping = { 'DOUBLE': ctypes.c_double, 'FLOAT': ctypes.c_float, 'HALF': ctypes.c_short, 'LONG': ctypes.c_long, 'INT': ctypes.c_int, 'SHORT': ctypes.c_short, 'BOOL': ctypes.c_bool } Pointer = ctypes.POINTER(mapping[dtype]) pointer = ctypes.cast(address, Pointer) np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape())) return np_array def _indarray(x): typ = type(x) if typ is INDArray: return x elif typ is ndarray: return x.array elif 'numpy' in str(typ): return _from_numpy(x) elif typ in (list, tuple): return _from_numpy(np.array(x)) elif typ in (int, float): return Nd4j.scalar(x) else: raise Exception('Data type not understood :' + str(typ)) def _nparray(x): typ = type(x) if typ is INDArray: return ndarray(x).numpy() elif typ is ndarray: return x.numpy() elif 'numpy' in str(typ): return x elif typ in (list, tuple): return np.array(x) elif typ in (int, float): return np.array(x) else: raise Exception('Data type not understood :' + str(typ)) def broadcast_like(y, x): xs = x.shape() ys = y.shape() if xs == ys: return y _xs = tuple(xs) _ys = tuple(ys) nx = len(xs) ny = len(ys) if nx > ny: diff = nx - ny ys = ([1] * diff) + ys y = y.reshape(ys) ny = nx elif ny > nx: raise Exception('Unable to broadcast shapes ' + str(_xs) + '' ' and ' + str(_ys)) yt = [] rep_y = False for xd, yd in zip(xs, ys): if xd == yd: yt.append(1) elif xd == 1: raise Exception('Unable to broadcast shapes ' + str(_xs) + '' ' and ' + str(_ys)) elif yd == 1: yt.append(xd) rep_y = True else: raise Exception('Unable to broadcast shapes ' + str(_xs) + '' ' and ' + str(_ys)) if rep_y: y = y.repmat(*yt) return y def broadcast(x, y): xs = x.shape() ys = y.shape() if xs == ys: return x, y _xs = tuple(xs) _ys = tuple(ys) nx = len(xs) ny = len(ys) if nx > ny: diff = nx - ny ys = ([1] * diff) + ys y = y.reshape(*ys) ny = nx elif ny > nx: diff = ny - nx xs = ([1] * diff) + xs x = x.reshape(*xs) nx = ny xt = [] yt = [] rep_x = False rep_y = False for xd, yd in zip(xs, ys): if xd == yd: xt.append(1) yt.append(1) elif xd == 1: xt.append(yd) yt.append(1) rep_x = True elif yd == 1: xt.append(1) yt.append(xd) rep_y = True else: raise Exception('Unable to broadcast shapes ' + str(_xs) + '' ' and ' + str(_ys)) if rep_x: x = Nd4j.tile(x, *xt) if rep_y: try: y = Nd4j.tile(y, *yt) except: y = Nd4j.tile(y, *yt) return x, y class ndarray(object): def __init__(self, data, dtype=None): # we ignore dtype for now typ = type(data) if 'nd4j' in typ.__name__: # Note that we don't make a copy here self.array = data elif typ is ndarray: self.array = data.array.dup() else: if typ is not np.ndarray: data = np.array(data) self.array = _from_numpy(data) def numpy(self): try: return self.np_array except AttributeError: self.np_array = _to_numpy(self.array) return self.np_array @property def size(self): return self.array.length() @property def shape(self): return tuple(self.array.shape()) @shape.setter def shape(self, value): arr = self.reshape(value) self.array = arr.array @property def ndim(self): return len(self.array.shape()) def __getitem__(self, key): return ndarray(self.numpy()[key]) if type(key) is int: return ndarray(self.array.get(NDArrayIndex.point(key))) if type(key) is slice: start = key.start stop = key.stop step = key.step if start is None: start = 0 if stop is None: shape = self.array.shape() if shape[0] == 1: stop = shape[1] else: stop = shape[0] if stop - start <= 0: return None if step is None or step == 1: return ndarray(self.array.get(NDArrayIndex.interval(start, stop))) else: return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop))) if type(key) is list: raise NotImplementedError( 'Sorry, this type of indexing is not supported yet.') if type(key) is tuple: key = list(key) shape = self.array.shape() ndim = len(shape) nk = len(key) key += [slice(None)] * (ndim - nk) args = [] for i, dim in enumerate(key): if type(dim) is int: args.append(NDArrayIndex.point(dim)) elif type(dim) is slice: if dim == slice(None): args.append(NDArrayIndex.all()) else: start = dim.start stop = dim.stop step = dim.step if start is None: start = 0 if stop is None: stop = shape[i] if stop - start <= 0: return None if step is None or step == 1: args.append(NDArrayIndex.interval(start, stop)) else: args.append(NDArrayIndex.interval( start, step, stop)) elif type(dim) in (list, tuple): raise NotImplementedError( 'Sorry, this type of indexing is not supported yet.') return ndarray(self.array.get(*args)) def __setitem__(self, key, other): self.numpy()[key] = _nparray(other) return other = _indarray(other) view = self[key] if view is None: return view = view.array other = broadcast_like(other, view) view.assign(other) def __add__(self, other): return ndarray(self.numpy() + _nparray(other)) other = _indarray(other) x, y = broadcast(self.array, other) return ndarray(x.add(y)) def __sub__(self, other): return ndarray(self.numpy() - _nparray(other)) other = _indarray(other) x, y = broadcast(self.array, other) return ndarray(x.sub(y)) def __mul__(self, other): return ndarray(self.numpy() * _nparray(other)) other = _indarray(other) x, y = broadcast(self.array, other) return ndarray(x.mul(y)) def __div__(self, other): return ndarray(self.numpy() / _nparray(other)) other = _indarray(other) x, y = broadcast(self.array, other) return ndarray(x.div(y)) def __pow__(self, other): return ndarray(self.numpy() ** _nparray(other)) other = _indarray(other) x, y = broadcast(self.array, other) return ndarray(Transforms.pow(x, y)) def __iadd__(self, other): self.numpy().__iadd__(_nparray(other)) return self other = _indarray(other) if self.array.shape() == other.shape(): self.array = self.array.addi(other) else: x, y = broadcast(self.array, other) self.array = x.add(y) return self def __isub__(self, other): self.numpy().__isub__(_nparray(other)) return self other = _indarray(other) if self.array.shape() == other.shape(): self.array = self.array.subi(other) else: x, y = broadcast(self.array, other) self.array = x.sub(y) return self def __imul__(self, other): self.numpy().__imul__(_nparray(other)) return self other = _indarray(other) if self.array.shape() == other.shape(): self.array = self.array.muli(other) else: x, y = broadcast(self.array, other) self.array = x.mul(y) return self def __idiv__(self, other): self.numpy().__idiv__(_nparray(other)) return self other = _indarray(other) if self.array.shape() == other.shape(): self.array = self.array.divi(other) else: x, y = broadcast(self.array, other) self.array = x.div(y) return self def __ipow__(self, other): self.numpy().__ipow__(_nparray(other)) return self other = _indarray(other) if self.array.shape() == other.shape(): self.array = self.array.divi(other) else: x, y = broadcast(self.array, other) self.array = Transforms.pow(x, y) return self def __getattr__(self, attr): import ops f = getattr(ops, attr) setattr(ndarray, attr, f) return getattr(self, attr) def __int__(self): if self.array.length() == 1: return self.array.getInt(0) raise Exception('Applicable only for scalars') def __float__(self): if self.array.length() == 1: return self.array.getDouble(0) raise Exception('Applicable only for scalars') @property def T(self): return self.transpose() def array(*args, **kwargs): return ndarray(*args, **kwargs)
27.980507
106
0.544448
6,890
0.480006
0
0
367
0.025568
0
0
2,006
0.139752
5d4ce281f4ac42992169e4a43a604e5e249ccc55
592
py
Python
Python/usec_mode.py
hanayik/StimSync
f08ec01a36c47b00bfe4937b5a6eb2a60af0713d
[ "BSD-2-Clause" ]
6
2017-12-04T18:33:45.000Z
2021-08-04T02:07:21.000Z
source/Python/usec_mode.py
neurolabusc/StimSync
749908572bda3073b0911566d50fe92d74d3cdb7
[ "BSD-2-Clause" ]
null
null
null
source/Python/usec_mode.py
neurolabusc/StimSync
749908572bda3073b0911566d50fe92d74d3cdb7
[ "BSD-2-Clause" ]
3
2018-01-13T12:17:18.000Z
2021-08-01T06:43:10.000Z
import serial ser = serial.Serial('/dev/tty.usbmodem7071', 115200, timeout=10) ser.write("\xb1\xa3\xb5\xb5") #set usec mode 177,163,181,181 ser.flush() ser.flushInput() obs = ser.read(8) if len(obs) != 8: print('Error: no buttons presses detected') print 'Observed data (as hex): '+ obs.encode('hex') obsBin = [ord(c) for c in obs] usec = (obsBin[3] << 24)+ (obsBin[4] << 16)+ (obsBin[5] << 8)+obsBin[6] keys = (obsBin[1] << 8)+obsBin[2] print 'keys pressed %d at %d usec' % (keys, usec) ser.write("\xb1\xa3\xa9\xa9") #turn off oscilloscope: set keyboard mode 177,163,169,169 ser.close()
37
87
0.663851
0
0
0
0
0
0
0
0
241
0.407095
5d4e94cca5bcc101399e2e8aec4db86507599854
4,839
py
Python
torchaudio/datasets/libritts.py
hahaxun/audio
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
[ "BSD-2-Clause" ]
1
2021-04-20T09:04:24.000Z
2021-04-20T09:04:24.000Z
torchaudio/datasets/libritts.py
hahaxun/audio
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
[ "BSD-2-Clause" ]
null
null
null
torchaudio/datasets/libritts.py
hahaxun/audio
87a1886ecfa83b398a2a9a09d9a94bd9dabc5cf5
[ "BSD-2-Clause" ]
null
null
null
import os from typing import Tuple import torchaudio from torch import Tensor from torch.utils.data import Dataset from torchaudio.datasets.utils import ( download_url, extract_archive, walk_files, ) URL = "train-clean-100" FOLDER_IN_ARCHIVE = "LibriTTS" _CHECKSUMS = { "http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207", "http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d", "http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f", "http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4", "http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8", "http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d", "http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f", } def load_libritts_item( fileid: str, path: str, ext_audio: str, ext_original_txt: str, ext_normalized_txt: str, ) -> Tuple[Tensor, int, str, str, int, int, str]: speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_") utterance_id = fileid normalized_text = utterance_id + ext_normalized_txt normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text) original_text = utterance_id + ext_original_txt original_text = os.path.join(path, speaker_id, chapter_id, original_text) file_audio = utterance_id + ext_audio file_audio = os.path.join(path, speaker_id, chapter_id, file_audio) # Load audio waveform, sample_rate = torchaudio.load(file_audio) # Load original text with open(original_text) as ft: original_text = ft.readline() # Load normalized text with open(normalized_text, "r") as ft: normalized_text = ft.readline() return ( waveform, sample_rate, original_text, normalized_text, int(speaker_id), int(chapter_id), utterance_id, ) class LIBRITTS(Dataset): """Create a Dataset for LibriTTS. Args: root (str): Path to the directory where the dataset is found or downloaded. url (str, optional): The URL to download the dataset from, or the type of the dataset to dowload. Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``, ``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and ``"train-other-500"``. (default: ``"train-clean-100"``) folder_in_archive (str, optional): The top-level directory of the dataset. (default: ``"LibriTTS"``) download (bool, optional): Whether to download the dataset if it is not found at root path. (default: ``False``). """ _ext_original_txt = ".original.txt" _ext_normalized_txt = ".normalized.txt" _ext_audio = ".wav" def __init__( self, root: str, url: str = URL, folder_in_archive: str = FOLDER_IN_ARCHIVE, download: bool = False, ) -> None: if url in [ "dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", "train-clean-360", "train-other-500", ]: ext_archive = ".tar.gz" base_url = "http://www.openslr.org/resources/60/" url = os.path.join(base_url, url + ext_archive) basename = os.path.basename(url) archive = os.path.join(root, basename) basename = basename.split(".")[0] folder_in_archive = os.path.join(folder_in_archive, basename) self._path = os.path.join(root, folder_in_archive) if download: if not os.path.isdir(self._path): if not os.path.isfile(archive): checksum = _CHECKSUMS.get(url, None) download_url(url, root, hash_value=checksum) extract_archive(archive) walker = walk_files( self._path, suffix=self._ext_audio, prefix=False, remove_suffix=True ) self._walker = list(walker) def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]: """Load the n-th sample from the dataset. Args: n (int): The index of the sample to be loaded Returns: tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id)`` """ fileid = self._walker[n] return load_libritts_item( fileid, self._path, self._ext_audio, self._ext_original_txt, self._ext_normalized_txt, ) def __len__(self) -> int: return len(self._walker)
32.046358
98
0.619963
2,796
0.577805
0
0
0
0
0
0
1,855
0.383344
5d4ed462393daeadb0c9bc3293879acfa5af3ab3
2,164
py
Python
Others/Source/19/19.2/barh_test.py
silence0201/Learn-Python
662da7c0e74221cedb445ba17d5cb1cd3af41c86
[ "MIT" ]
1
2018-05-30T01:38:23.000Z
2018-05-30T01:38:23.000Z
Others/Source/19/19.2/barh_test.py
silence0201/Learn-Python
662da7c0e74221cedb445ba17d5cb1cd3af41c86
[ "MIT" ]
null
null
null
Others/Source/19/19.2/barh_test.py
silence0201/Learn-Python
662da7c0e74221cedb445ba17d5cb1cd3af41c86
[ "MIT" ]
null
null
null
# coding: utf-8 ######################################################################### # 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> # # author yeeku.H.lee [email protected] # # # # version 1.0 # # # # Copyright (C), 2001-2018, yeeku.H.Lee # # # # This program is protected by copyright laws. # # # # Program Name: # # # # <br>Date: # ######################################################################### import matplotlib.pyplot as plt import numpy as np # 构建数据 x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017'] y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000] y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700] bar_width=0.3 # Y轴数据使用range(len(x_data), 就是0、1、2... plt.barh(y=range(len(x_data)), width=y_data, label='疯狂Java讲义', color='steelblue', alpha=0.8, height=bar_width) # Y轴数据使用np.arange(len(x_data))+bar_width, # 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了 plt.barh(y=np.arange(len(x_data))+bar_width, width=y_data2, label='疯狂Android讲义', color='indianred', alpha=0.8, height=bar_width) # 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式 for y, x in enumerate(y_data): plt.text(x+5000, y-bar_width/2, '%s' % x, ha='center', va='bottom') for y, x in enumerate(y_data2): plt.text(x+5000, y+bar_width/2, '%s' % x, ha='center', va='bottom') # 为Y轴设置刻度值 plt.yticks(np.arange(len(x_data))+bar_width/2, x_data) # 设置标题 plt.title("Java与Android图书对比") # 为两条坐标轴设置名称 plt.xlabel("销量") plt.ylabel("年份") # 显示图例 plt.legend() plt.show()
46.042553
74
0.420055
0
0
0
0
0
0
0
0
1,643
0.688023
5d4ed8a99839b3110a2db17a408cf4dde65b3291
2,336
py
Python
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
kendny/study_docker
edb376fb69319a78e05f60faa5dcc88d527602c4
[ "BSD-2-Clause" ]
2
2019-05-09T01:41:16.000Z
2022-01-06T01:06:07.000Z
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
walkacross/docker_in_practice
da24da76b4fa3eabca5004abd59d7eef7a48988b
[ "BSD-2-Clause" ]
null
null
null
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py
walkacross/docker_in_practice
da24da76b4fa3eabca5004abd59d7eef7a48988b
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Dec 18 20:13:57 2018 @author: allen """ import random, os, json, datetime, time from flask import Flask, Response from pymongo import MongoClient from bson import json_util app = Flask(__name__) MONGO_URI = "mongodb://mongodb:27017" # "mongodb:<container_name>:27017" mongdb_client= MongoClient(MONGO_URI) random_numbers = mongdb_client.demo.random_numbers time.sleep(5) # hack for the mongoDb database to get running ###################### ## ########################## from pymodm.connection import connect from pymongo.write_concern import WriteConcern from pymodm import MongoModel, fields # Connect to MongoDB and call the connection "my-app". connect("mongodb://mongodb:27017/myDatabase", alias="my-app") class User(MongoModel): email = fields.EmailField(primary_key=True) first_name = fields.CharField() last_name = fields.CharField() class Meta: write_concern = WriteConcern(j=True) connection_alias = 'my-app' @app.route("/") def hello(): html = "<h3> Hello world...</h3>" #User('[email protected]', name, 'Ross').save() return html @app.route("/add_user/<name>") def add_user(name): #User('[email protected]', name, 'Ross').save() html = "<h3> Hello </h3>" User('[email protected]', name, 'Ross').save() return "name {} save to database".format(name) @app.route("/random/<int:lower>/<int:upper>") def random_generator(lower, upper): number = str(random.randint(lower, upper)) random_numbers.update( {"_id" : "lasts"}, {"$push" : { "items" : { "$each": [{"value" : number, "date": datetime.datetime.utcnow()}], "$sort" : {"date" : -1}, "$slice" : 5 } }}, upsert=True ) return Response(number, status=200, mimetype='application/json') @app.route("/random-list") def last_number_list(): last_numbers = list(random_numbers.find({"_id" : "lasts"})) extracted = [d['value'] for d in last_numbers[0]['items']] return Response(json.dumps(extracted, default=json_util.default), status=200, mimetype='application/json') if __name__ == "__main__": port = int(os.environ.get('PORT', 5000)) app.config['DEBUG'] = True app.run(host='0.0.0.0', port=port)
26.545455
110
0.630137
240
0.10274
0
0
1,152
0.493151
0
0
774
0.331336
5d4f428d1c149bf1e2a1658ede1f6e9adcddbdd2
1,523
py
Python
goethe/eval/analogy_space.py
HPI-DeepLearning/wort2vek
bc91c2752a8516665d270c7a7a793ec484c970c4
[ "MIT" ]
4
2017-05-01T01:02:40.000Z
2022-02-03T16:14:19.000Z
goethe/eval/analogy_space.py
HPI-DeepLearning/wort2vek
bc91c2752a8516665d270c7a7a793ec484c970c4
[ "MIT" ]
6
2017-04-06T22:10:09.000Z
2017-04-06T22:10:57.000Z
goethe/eval/analogy_space.py
HPI-DeepLearning/wort2vek
bc91c2752a8516665d270c7a7a793ec484c970c4
[ "MIT" ]
null
null
null
#! /usr/bin/Python from gensim.models.keyedvectors import KeyedVectors from scipy import spatial from numpy import linalg import argparse import sys vector_file = sys.argv[1] if len(sys.argv) != 6: print('arguments wrong!') print(len(sys.argv)) exit() else: words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]] print(words) wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True) print('WVs loaded.') for w in words: if w not in wvs.vocab: print('out of vocab!') exit() #print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3)) w1 = wvs[words[0]] w2 = wvs[words[1]] w3 = wvs[words[2]] w4 = wvs[words[3]] m1 = w1 / linalg.norm(w1) m2 = w2 / linalg.norm(w2) m3 = w3 / linalg.norm(w3) m4 = w4 / linalg.norm(w4) diff1 = w1 - w2 diff2 = w3 - w4 miff1 = m1 - m2 miff2 = m3 - m4 print('-------Word Space---------') print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4)) print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3)) print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2)) print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1)) print('------Analogy Space-------') print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2)) print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2))) print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2)) print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
27.690909
91
0.644123
0
0
0
0
0
0
0
0
318
0.208798
5d5195b07f67a7785033de940e7003695bbf2ec4
2,497
py
Python
localgraphclustering/algorithms/eig2_nL.py
vishalbelsare/LocalGraphClustering
a6325350997932d548a876deb259c2387fc2c809
[ "MIT" ]
106
2017-09-06T04:47:02.000Z
2022-03-30T07:43:27.000Z
localgraphclustering/algorithms/eig2_nL.py
vishalbelsare/LocalGraphClustering
a6325350997932d548a876deb259c2387fc2c809
[ "MIT" ]
51
2017-09-06T02:22:09.000Z
2021-12-15T11:39:28.000Z
localgraphclustering/algorithms/eig2_nL.py
vishalbelsare/LocalGraphClustering
a6325350997932d548a876deb259c2387fc2c809
[ "MIT" ]
38
2017-09-04T21:45:13.000Z
2022-01-19T09:48:25.000Z
import numpy as np import scipy as sp import scipy.sparse.linalg as splinalg def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1): """ DESCRIPTION ----------- Computes the eigenvector that corresponds to the second smallest eigenvalue of the normalized Laplacian matrix then it uses sweep cut to round the solution. PARAMETERS (mandatory) ---------------------- g: graph object PARAMETERS (optional) --------------------- dim: positive, int default == 1 The number of eigenvectors or dimensions to compute. tol_eigs: positive float, double default == 1.0e-6 Tolerance for computation of the eigenvector that corresponds to the second smallest eigenvalue of the normalized Laplacian matrix. normalize: bool, default == True True if we should return the eigenvectors of the generalized eigenvalue problem associated with the normalized Laplacian. This should be on unless you know what you are doing. RETURNS ------ p: Eigenvector or Eigenvector matrixthat corresponds to the second smallest eigenvalue of the normalized Laplacian matrix and larger eigenvectors if dim >= 0. """ n = g.adjacency_matrix.shape[0] D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n) L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg))) emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs) F = np.real(p[:,1:]) if normalize: F *= g.dn_sqrt[:,np.newaxis] return F, emb_eig_val """ Random walks and local cuts in graphs, Chung, LAA 2007 We just form the sub-matrix of the Laplacian and use the eigenvector there. """ def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True): A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes] nref = len(ref_nodes) D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref) L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg))) emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs) emb_eig *= -1 if max(emb_eig) < 0 else 1 f = emb_eig[:,0] if normalize: f *= g.dn_sqrt[ref_nodes] return ((ref_nodes,f), emb_eig_val)
33.293333
87
0.621946
0
0
0
0
0
0
0
0
1,393
0.557869
5d527097e73751e96803cabcd187b0fd2d52470c
1,737
py
Python
build/common/hex2carray.py
isabella232/nanos-nonsecure-firmware
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
[ "Apache-2.0" ]
16
2018-03-20T11:52:29.000Z
2021-02-12T07:39:54.000Z
build/common/hex2carray.py
LedgerHQ/nanos-nonsecure-firmware
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
[ "Apache-2.0" ]
1
2022-03-06T09:56:16.000Z
2022-03-06T09:56:16.000Z
build/common/hex2carray.py
isabella232/nanos-nonsecure-firmware
d1ce2e0e01a8ed6d8840a24308e16f6560a626aa
[ "Apache-2.0" ]
7
2017-08-24T00:42:09.000Z
2022-03-06T09:51:51.000Z
""" ******************************************************************************* * Ledger Blue * (c) 2016 Ledger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************** """ from ledgerblue.hexParser import IntelHexParser import argparse parser = argparse.ArgumentParser() parser.add_argument("--hex", help="Hex file to be converted as a C array") args = parser.parse_args() if args.hex == None: raise Exception("Missing hex filename to sign") parser = IntelHexParser(args.hex) def hexU8(value): return hex(0x100|(value & 0xFF))[3:] for a in parser.getAreas(): if (len(a.data) > 0x10000): raise BaseException("data must be splitted in chunks of 64k") print "0x" + hexU8(a.start >> 24) + ", 0x" + hexU8(a.start >> 16) + ", 0x" + hexU8(a.start >> 8) + ", 0x" + hexU8(a.start) + ", " print "0x" + hexU8(len(a.data) >> 24) + ", 0x" + hexU8(len(a.data) >> 16) + ", 0x" + hexU8(len(a.data) >> 8) + ", 0x" + hexU8(len(a.data)) + ", " # low @ to high @ offset = 0 while offset < len(a.data): string = "" for i in range(8): if offset+i < len(a.data): string += " 0x" + hexU8(a.data[offset+i]) + "," print string offset+=8
31.581818
146
0.599885
0
0
0
0
0
0
0
0
954
0.549223
5d52775ef423ec088ebd9b5618d6a0b7639f157e
2,418
py
Python
setup.py
xames3/vdoxa
8fa945449bb34447ded0c421214c0252ff523d4a
[ "Apache-2.0" ]
1
2020-02-04T08:18:54.000Z
2020-02-04T08:18:54.000Z
setup.py
xames3/vdoxa
8fa945449bb34447ded0c421214c0252ff523d4a
[ "Apache-2.0" ]
null
null
null
setup.py
xames3/vdoxa
8fa945449bb34447ded0c421214c0252ff523d4a
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 XAMES3. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # ====================================================================== """ vdoXA is an open-source python package for trimming the videos. It is built as a subsystem for < XXXXX Not to be named XXXXX > project. Originally inspired by my colleague's work, I thought of improving the concept and build a tool to simplify the process. I hope it comes with strong support for continuous updates, reliable functions and overall ease of use. Read complete documentation at: <https://github.com/xames3/vdoxa>. """ from setuptools import find_packages, setup from vdoxa.vars import dev doclines = __doc__.split('\n') def use_readme() -> str: """Use `README.md` for parsing long description.""" with open('README.md') as file: return file.read() with open('requirements.txt', 'r') as requirements: required_packages = [package.rstrip() for package in requirements] setup( name=dev.PROJECT_NAME, version=dev.PROJECT_VERSION, url=dev.PROJECT_LINK, download_url=dev.PROJECT_LINK, author=dev.AUTHOR, author_email=dev.AUTHOR_EMAIL, maintainer=dev.AUTHOR, maintainer_email=dev.AUTHOR_EMAIL, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', ], license=dev.PROJECT_LICENSE, description=f'{doclines[1]}', long_description=use_readme(), long_description_content_type='text/markdown', keywords='opencv2 cv2 moviepy', zip_safe=False, install_requires=required_packages, python_requires='~=3.6', include_package_data=True, packages=find_packages(), entry_points={ 'console_scripts': [ 'vdoxa = vdoxa.parser:main', ], } )
31
72
0.715054
0
0
0
0
0
0
0
0
1,519
0.628205
5d52a5f4ab272695a4c951a2d0a2e0909bf0ed0b
1,413
py
Python
application/modules/login.py
BaggerFast/Simple_votings
843769fa6fd2c04feb542e6b301b7b4810260d4e
[ "MIT" ]
null
null
null
application/modules/login.py
BaggerFast/Simple_votings
843769fa6fd2c04feb542e6b301b7b4810260d4e
[ "MIT" ]
null
null
null
application/modules/login.py
BaggerFast/Simple_votings
843769fa6fd2c04feb542e6b301b7b4810260d4e
[ "MIT" ]
null
null
null
from django.contrib import messages from django.contrib.auth import login, authenticate from django.shortcuts import render, redirect from django.urls import reverse from django.views import View from application.forms import AuthenticateForm from application.views import get_navbar, Page class LoginView(View): def __init__(self, **kwargs): super().__init__(**kwargs) self.context = {} def get(self, request): self.context['navbar'] = get_navbar(request) self.context['form'] = AuthenticateForm() return render(request, Page.login, self.context) def post(self, request): self.context['navbar'] = get_navbar(request) data = request.POST form = AuthenticateForm(data) if form.is_valid(): user = authenticate( username=data['username'], password=data['password'], ) if user: login(request, user) messages.success(request, 'You have successfully logged in!') return redirect(reverse('main')) messages.error(request, 'Invalid username and password pair.', extra_tags='danger') else: messages.error(request, 'Invalid username and password pair.', extra_tags='danger') self.context['form'] = AuthenticateForm(data) return render(request, Page.login, self.context)
36.230769
95
0.640481
1,119
0.791932
0
0
0
0
0
0
178
0.125973
5d52ada1ae418220d17ef038d3cc8e85cc6253d2
2,938
py
Python
little_questions/utils/log.py
HelloChatterbox/little_questions
04bee86244b42fdaed9f8d010c2f83037ad753f6
[ "MIT" ]
null
null
null
little_questions/utils/log.py
HelloChatterbox/little_questions
04bee86244b42fdaed9f8d010c2f83037ad753f6
[ "MIT" ]
null
null
null
little_questions/utils/log.py
HelloChatterbox/little_questions
04bee86244b42fdaed9f8d010c2f83037ad753f6
[ "MIT" ]
null
null
null
# Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import inspect import logging import sys class LOG: """ Custom logger class that acts like logging.Logger The logger name is automatically generated by the module of the caller Usage: >>> LOG.debug('My message: %s', debug_str) 13:12:43.673 - :<module>:1 - DEBUG - My message: hi >>> LOG('custom_name').debug('Another message') 13:13:10.462 - custom_name - DEBUG - Another message """ base_path = "stdout" fmt = '%(asctime)s.%(msecs)03d - ' \ '%(name)s - %(levelname)s - %(message)s' datefmt = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(fmt, datefmt) name = 'little_questions' level = "DEBUG" _loggers = {} @classmethod def set_level(cls, level="INFO"): cls.level = level for n in cls._loggers: cls._loggers[n].setLevel(cls.level) @classmethod def create_logger(cls, name): if name in cls._loggers: return cls._loggers[name] logger = logging.getLogger(name) logger.propagate = False stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setFormatter(cls.formatter) logger.addHandler(stdout_handler) logger.setLevel(cls.level) cls._loggers[name] = logger return logger @classmethod def _log(cls): name = "" if cls.name is not None: name = cls.name + " - " # Stack: # [0] - _log() # [1] - debug(), info(), warning(), or error() # [2] - caller stack = inspect.stack() # Record: # [0] - frame object # [1] - filename # [2] - line number # [3] - function # ... record = stack[2] name += record[3] + ':' + str(record[2]) logger = cls.create_logger(name) return logger @classmethod def info(cls, *args, **kwargs): cls._log().info(*args, **kwargs) @classmethod def debug(cls, *args, **kwargs): cls._log().debug(*args, **kwargs) @classmethod def warning(cls, *args, **kwargs): cls._log().warning(*args, **kwargs) @classmethod def error(cls, *args, **kwargs): cls._log().error(*args, **kwargs) @classmethod def exception(cls, *args, **kwargs): cls._log().exception(*args, **kwargs)
28.803922
74
0.596664
2,314
0.787611
0
0
1,604
0.54595
0
0
1,249
0.425119
5d53556c82d1a27255c1497656b5efc347cde76d
1,035
py
Python
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse class AlipayOpenMiniVersionAuditApplyResponse(AlipayResponse): def __init__(self): super(AlipayOpenMiniVersionAuditApplyResponse, self).__init__() self._speed_up = None self._speed_up_memo = None @property def speed_up(self): return self._speed_up @speed_up.setter def speed_up(self, value): self._speed_up = value @property def speed_up_memo(self): return self._speed_up_memo @speed_up_memo.setter def speed_up_memo(self, value): self._speed_up_memo = value def parse_response_content(self, response_content): response = super(AlipayOpenMiniVersionAuditApplyResponse, self).parse_response_content(response_content) if 'speed_up' in response: self.speed_up = response['speed_up'] if 'speed_up_memo' in response: self.speed_up_memo = response['speed_up_memo']
28.75
112
0.699517
907
0.876329
0
0
307
0.296618
0
0
94
0.090821
5d53e848dc1be11f4d81bb7ffe655fc1c2f327c3
1,923
py
Python
cvstudio/view/widgets/loading_dialog/loading_dialog.py
haruiz/PytorchCvStudio
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
[ "MIT" ]
32
2019-10-31T03:10:52.000Z
2020-12-23T11:50:53.000Z
cvstudio/view/widgets/loading_dialog/loading_dialog.py
haruiz/CvStudio
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
[ "MIT" ]
19
2019-10-31T15:06:05.000Z
2020-06-15T02:21:55.000Z
cvstudio/view/widgets/loading_dialog/loading_dialog.py
haruiz/PytorchCvStudio
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
[ "MIT" ]
8
2019-10-31T03:32:50.000Z
2020-07-17T20:47:37.000Z
import os from PyQt5 import QtCore from PyQt5.QtCore import QRect, QPoint from PyQt5.QtGui import QMovie, QCloseEvent, QShowEvent from PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout, QApplication, QWidget class QLoadingDialog(QDialog): def __init__(self, parent=None): super(QLoadingDialog, self).__init__() self.setFixedSize(100, 100) # self.setWindowOpacity(0.8) self.setWindowFlags(QtCore.Qt.FramelessWindowHint) self.setAttribute(QtCore.Qt.WA_TranslucentBackground) app = QApplication.instance() curr_theme = "light" if app: curr_theme = app.property("theme") gif_file = os.path.abspath("./assets/icons/{}/loading.gif".format(curr_theme)) self.movie = QMovie(gif_file) self.label = QLabel() self.label.setMovie(self.movie) self.layout = QVBoxLayout(self) self.layout.addWidget(self.label) def center(self, host: QWidget = None): if host: hostGeometry: QRect = host.geometry() # dialogGeometry : QRect = self.geometry() centerPoint: QPoint = hostGeometry.center() centerPoint = host.mapToGlobal(centerPoint) offset = 30 targetPoint = QPoint(centerPoint.x() - offset, centerPoint.y() - offset) self.move(targetPoint) else: screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos()) centerPoint = QApplication.desktop().screenGeometry(screen).center() self.move(centerPoint) return self def showEvent(self, e: QShowEvent): if self.movie.state() == QMovie.NotRunning: self.movie.start() def closeEvent(self, e: QCloseEvent): if self.movie.state() == QMovie.Running: self.movie.stop() def exec_(self): self.center() return QDialog.exec_(self)
36.283019
95
0.637546
1,709
0.888716
0
0
0
0
0
0
115
0.059802
5d546fd247cbdfbb018dec6e3f4e3273ffdefdb8
3,115
py
Python
pysnmp-with-texts/MWORKS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/MWORKS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/MWORKS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module MWORKS-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-MIB # Produced by pysmi-0.3.4 at Wed May 1 14:16:04 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Gauge32, Unsigned32, ObjectIdentity, IpAddress, Bits, MibIdentifier, Integer32, enterprises, ModuleIdentity, TimeTicks, Counter32, NotificationType, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "ObjectIdentity", "IpAddress", "Bits", "MibIdentifier", "Integer32", "enterprises", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217)) meterWorks = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16)) mw501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1)) mwMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 1)) mwHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 2)) mwMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: mwMemCeiling.setStatus('mandatory') if mibBuilder.loadTexts: mwMemCeiling.setDescription('bytes of memory the agent memory manager will allow the agent to use.') mwMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: mwMemUsed.setStatus('mandatory') if mibBuilder.loadTexts: mwMemUsed.setDescription("bytes of memory that meterworks has malloc'ed. some of this may be in free pools.") mwHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: mwHeapTotal.setStatus('mandatory') if mibBuilder.loadTexts: mwHeapTotal.setDescription('bytes of memory given to the heap manager.') mwHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: mwHeapUsed.setStatus('mandatory') if mibBuilder.loadTexts: mwHeapUsed.setDescription('bytes of available memory in the heap.') mibBuilder.exportSymbols("MWORKS-MIB", mwHeap=mwHeap, mwHeapUsed=mwHeapUsed, mwMemCeiling=mwMemCeiling, meterWorks=meterWorks, tecElite=tecElite, mwMem=mwMem, mw501=mw501, mwHeapTotal=mwHeapTotal, mwMemUsed=mwMemUsed)
97.34375
505
0.759551
0
0
0
0
0
0
0
0
1,182
0.379454
5d54ea522a32fa91aca889c9606f036f2de763c3
3,935
py
Python
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
mpire-nxus/nxus_unity_sdk
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
[ "MIT" ]
1
2018-03-13T02:44:15.000Z
2018-03-13T02:44:15.000Z
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
mpire-nxus/nxus_unity_sdk
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
[ "MIT" ]
null
null
null
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py
mpire-nxus/nxus_unity_sdk
34a1ebfc588c47c1c71fae11f29e82c1172c6dc2
[ "MIT" ]
null
null
null
#!/usr/bin/env python import sys import re from subprocess import Popen, PIPE import argparse from pbxproj import XcodeProject, TreeType from pbxproj import FileOptions def main(): parser = argparse.ArgumentParser(description="MpireNxusMeasurement post build iOS script") parser.add_argument('ios_project_path', help="path to the folder of the iOS project generated by unity3d") with open('MpireNxusMeasurementPostBuildiOSLog.txt', 'w') as fileLog: # Log function with file injected. LogFunc = LogInput(fileLog) # Path of the Xcode SDK on the system. xcode_sdk_path = get_xcode_sdk_path(LogFunc) # Path for unity iOS Xcode project and framework on the system. unity_xcode_project_path, framework_path = get_paths(LogFunc, parser, xcode_sdk_path) # Edit the Xcode project using mod_pbxproj: # - Add the adSupport framework library. # - Add the iAd framework library. # - Change the compilation flags of the adjust project files to support non-ARC. edit_unity_xcode_project(LogFunc, unity_xcode_project_path, framework_path) # Removed. # Change the Xcode project directly: # - Allow objective-c exceptions # rewrite_unity_xcode_project(LogFunc, unity_xcode_project_path) sys.exit(0) def LogInput(writeObject): def Log(message, *args): messageNLine = (message if message else "None") + "\n" writeObject.write(messageNLine.format(*args)) return Log def get_paths(Log, parser, xcode_sdk_path): args, ignored_args = parser.parse_known_args() ios_project_path = args.ios_project_path unity_xcode_project_path = ios_project_path + "/Unity-iPhone.xcodeproj/project.pbxproj" Log("Unity3d Xcode project path: {0}", unity_xcode_project_path) framework_path = xcode_sdk_path + "/System/Library/Frameworks/" Log("framework path: {0}", framework_path) return unity_xcode_project_path, framework_path def edit_unity_xcode_project(Log, unity_xcode_project_path, framework_path): # load unity iOS pbxproj project file unity_XcodeProject = XcodeProject.load(unity_xcode_project_path) frameworks = unity_XcodeProject.get_or_create_group('Frameworks') file_options_security_framework = FileOptions(embed_framework=False, weak=True) unity_XcodeProject.add_file(framework_path + "Security.framework", parent=frameworks, tree='SDKROOT', force=False, file_options=file_options_security_framework) Log("added Security framework") # Add -ObjC to "Other Linker Flags" project settings. unity_XcodeProject.add_other_ldflags('-ObjC') # Save changes. unity_XcodeProject.save() def rewrite_unity_xcode_project(Log, unity_xcode_project_path): unity_xcode_lines = [] # Allow objective-c exceptions re_objc_excep = re.compile(r"\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*") with open(unity_xcode_project_path) as upf: for line in upf: if re_objc_excep.match(line): #Log("matched line: {0}", re_objc_excep.match(line).group()) line = line.replace("NO","YES") Log("Objective-c exceptions enabled") unity_xcode_lines.append(line) with open(unity_xcode_project_path, "w+") as upf: upf.writelines(unity_xcode_lines) def get_xcode_sdk_path(Log): # Output all info from Xcode. proc = Popen(["xcodebuild", "-version", "-sdk"], stdout=PIPE, stderr=PIPE) out, err = proc.communicate() if proc.returncode not in [0, 66]: Log("Could not retrieve Xcode sdk path. code: {0}, err: {1}", proc.returncode, err) return None match = re.search("iPhoneOS.*?Path: (?P<sdk_path>.*?)\n", out, re.DOTALL) xcode_sdk_path = match.group('sdk_path') if match else None Log("Xcode sdk path: {0}", xcode_sdk_path) return xcode_sdk_path if __name__ == "__main__": main()
38.578431
164
0.706226
0
0
0
0
0
0
0
0
1,346
0.342058
5d553e6733970b4280761ad4ec3ddb284ae1146d
1,382
py
Python
vars_in_python.py
klyusba/python-quiz
9f469417458f8ba6b21f9507cc860ca4547ea67b
[ "MIT" ]
null
null
null
vars_in_python.py
klyusba/python-quiz
9f469417458f8ba6b21f9507cc860ca4547ea67b
[ "MIT" ]
null
null
null
vars_in_python.py
klyusba/python-quiz
9f469417458f8ba6b21f9507cc860ca4547ea67b
[ "MIT" ]
null
null
null
# == 1 == bar = [1, 2] def foo(bar): bar = sum(bar) return bar print(foo(bar)) # == 2 == bar = [1, 2] def foo(bar): bar[0] = 1 return sum(bar) print(foo(bar)) # == 3 == bar = [1, 2] def foo(): bar = sum(bar) return bar print(foo()) # == 4 == bar = [1, 2] def foo(bar): bar = [1, 2, 3, ] return sum(bar) print(foo(bar), bar) # == 5 == bar = [1, 2] def foo(bar): bar[:] = [1, 2, 3, ] return sum(bar) print(foo(bar), bar) # == 6 == try: bar = 1 / 0 print(bar) except ZeroDivisionError as bar: print(bar) print(bar) # == 7 == bar = [1, 2] print(list(bar for bar in bar)) print(bar) # == 8 == bar = [1, 2] f = lambda: sum(bar) print(f()) bar = [1, 2, 3, ] print(f()) # == 9 == bar = [1, 2] def foo(bar): return lambda: sum(bar) f = foo(bar) print(f()) bar = [1, 2, 3, ] print(f()) # == 10 == bar = [1, 2] foo = [] for i in bar: foo.append(lambda: i) print([f() for f in foo]) # == 11 == bar = [1, 2] foo = [ lambda: i for i in bar ] print(list(f() for f in foo)) # == 12 == bar = [1, 2] foo = [ lambda: i for i in bar ] print(list(f() for f in foo)) bar = [1, 2, 3, ] print(list(f() for f in foo)) bar[:] = [1, 2, 3, ] print(list(f() for f in foo)) # == 13 == bar = [1, 2] foo = [ lambda i=i: i for i in bar ] print(list(f() for f in foo))
11.145161
32
0.469609
0
0
0
0
0
0
0
0
147
0.106368
5d55a06354d86f35af5fb38858161328b7581a23
10,786
py
Python
hack/dev/gh-replay-events.py
sm43/pipelines-as-code
bd21e48c96ab128d533701ecd1a2df7a0d136d65
[ "Apache-2.0" ]
null
null
null
hack/dev/gh-replay-events.py
sm43/pipelines-as-code
bd21e48c96ab128d533701ecd1a2df7a0d136d65
[ "Apache-2.0" ]
null
null
null
hack/dev/gh-replay-events.py
sm43/pipelines-as-code
bd21e48c96ab128d533701ecd1a2df7a0d136d65
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Chmouel Boudjnah <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # See README.md for documentation import typing import argparse import base64 import hashlib import hmac import json import os import subprocess import sys import time import requests import ghapp_token NAMESPACE = "pipelines-as-code" SECRET_NAME = "pipelines-as-code-secret" ELNAME = "pipelines-as-code" EXPIRE_MINUTES_AS_SECONDS = ( int(os.environ.get("GITHUBAPP_TOKEN_EXPIRATION_MINUTES", 10)) * 60 ) def get_controller_route(): elroute = subprocess.run( f"kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json", shell=True, check=True, capture_output=True, ) return ( "https://" + json.loads(elroute.stdout)["items"][0]["status"]["ingress"][0]["host"] ) def get_controller_ingress(): elroute = subprocess.run( f"kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json", shell=True, check=True, capture_output=True, ) return ( "http://" + json.loads(elroute.stdout)["items"][0]["spec"]["rules"][0]["host"] ) def get_token_secret( github_api_url=ghapp_token.GITHUB_API_URL, expiration_time=EXPIRE_MINUTES_AS_SECONDS ): secret = subprocess.run( f"kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json", shell=True, check=True, capture_output=True, ) jeez = json.loads(secret.stdout) private_key = base64.b64decode(jeez["data"]["github-private-key"]) app_id = base64.b64decode(jeez["data"]["github-application-id"]) webhook_secret = base64.b64decode(jeez["data"]["webhook.secret"]).decode() if not private_key or not app_id or not webhook_secret: print( f"private_key={private_key[1:10]} or app_id={app_id} or webhook_secret={webhook_secret} are empty" ) sys.exit(1) gh = ghapp_token.GitHub( private_key, app_id, expiration_time, github_api_url, ) return gh.token, webhook_secret, app_id def _request_app_delivery(token, iid=None, api_url=ghapp_token.GITHUB_API_URL): url = f"{api_url}/app/hook/deliveries" if iid: url += f"/{iid}" headers = { "Accept": "application/vnd.github.v3+json", "Authorization": f"Bearer {token}", } return requests.request("GET", url, headers=headers) def _request_webhooks_installed( token: str, owner_repo: str, iid: typing.Union[int, None] = None, api_url: str = ghapp_token.GITHUB_API_URL, ): url = f"{api_url}/repos/{owner_repo}/hooks" if iid: url += f"/{iid}/deliveries" headers = { "Accept": "application/vnd.github.v3+json", "Authorization": f"Bearer {token}", } return requests.request("GET", url, headers=headers) def _request_webhooks_reattempt( token: str, owner_repo: str, iid: int, delivery_id: int, api_url: str = ghapp_token.GITHUB_API_URL, ): url = f"{api_url}/repos/{owner_repo}/hooks/{iid}/deliveries/{delivery_id}/attempts" print(url) headers = { "Accept": "application/vnd.github.v3+json", "Authorization": f"Bearer {token}", } return requests.request("POST", url, headers=headers) def ask_which(token: str, api_url: str, last: bool, deliveries: dict) -> int: dico = [] i = 1 if "message" in deliveries: print(deliveries) sys.exit(0) for delivery in deliveries: print( f"{i}) Action={delivery['action']} Event={delivery['event']} Delivered at {delivery['delivered_at']}" ) dico.append(delivery["id"]) if i == 10: break i += 1 chosen = input("Choose a delivery: ") # return _request_app_delivery(token, dico[int(chosen) - 1], api_url=api_url).json() return int(chosen) - 1 def webhook_get_delivery( token: str, owner_repo: str, last: bool = False, api_url: str = ghapp_token.GITHUB_API_URL, ) -> str: r = _request_webhooks_installed(token, api_url=api_url, owner_repo=owner_repo) r.raise_for_status() webhooks = r.json() if len(webhooks) == 1: webhook_id = int(webhooks[0]["id"]) elif len(webhooks) > 1: cnt = 1 for wh in webhooks: print(f"{cnt}) {wh['name']} - {wh['config']['url']} ") cnt += 1 chosen = input("Choose a delivery: ") webhook_id = int(webhooks[int(chosen) - 1]["id"]) else: print("could not find any webhook configuration on your repo {}") sys.exit(1) r = _request_webhooks_installed( token, api_url=api_url, owner_repo=owner_repo, iid=webhook_id ) r.raise_for_status() deliveries = r.json() if not deliveries: print("no deliveries has been set ") sys.exit(1) if last: delivery_id = deliveries[0]["id"] else: chosen = ask_which(token, api_url, last, r.json()) delivery_id = deliveries[chosen]["id"] r = _request_webhooks_reattempt( token=token, owner_repo=owner_repo, iid=webhook_id, api_url=api_url, delivery_id=delivery_id, ) r.raise_for_status() print(f"Delivery has been replayed, you can replay directly it with: ") s = f"http POST {api_url}/repos/{owner_repo}/hooks/{webhook_id}/deliveries/{delivery_id}/attempts" s += f' Authorization:"Bearer { os.environ.get("PASS_TOKEN", "$TOKEN") }"' s += " Accept:application/vnd.github.v3+json" print(s) return s def app_get_delivery( token: str, last: bool = False, api_url: str = ghapp_token.GITHUB_API_URL ) -> dict: r = _request_app_delivery(token, api_url=api_url) r.raise_for_status() deliveries = r.json() if not deliveries: print("no deliveries has been set ") sys.exit(1) if last: return _request_app_delivery(token, deliveries[0]["id"], api_url=api_url).json() chosen = ask_which(token, api_url, last, deliveries) return _request_app_delivery( token, deliveries[chosen]["id"], api_url=api_url ).json() def save_script(target: str, el_route: str, headers: dict, payload: str): s = f"""#!/usr/bin/env python3 import requests import sys payload = \"\"\"{json.dumps(payload)}\"\"\" headers={headers} el_route = "http://localhost:8080" if (len(sys.argv) > 1 and sys.argv[1] == "-l") else "{el_route}" r = requests.request("POST",el_route,data=payload.encode("utf-8"),headers=headers) r.raise_for_status() print("Request has been replayed on " + el_route) """ with open(target, "w") as fp: fp.write(s) os.chmod(target, 0o755) print(f"Request saved to {target}") def main(args): el = args.eroute if not el: try: el = get_controller_route() except subprocess.CalledProcessError: try: el = get_controller_ingress() except subprocess.CalledProcessError: print("Could not find an ingress or route") sys.exit(1) if args.webhook_repo: token, webhook_secret = args.webhook_token, args.webhook_secret replays = webhook_get_delivery( token, last=args.last_event, api_url=args.api_url, owner_repo=args.webhook_repo, ) if args.save: open(args.save, "w").write(f"""#!/usr/bin/env bash\n{replays}\n""") os.chmod(args.save, 0o755) print(f"Saved to {args.save}") sys.exit(0) else: token, webhook_secret, app_id = get_token_secret(github_api_url=args.api_url) delivery = app_get_delivery(token, args.last_event, args.api_url) jeez = delivery["request"]["payload"] headers = delivery["request"]["headers"] payload = json.dumps(jeez) esha256 = hmac.new( webhook_secret.encode("utf-8"), msg=payload.encode("utf-8"), digestmod=hashlib.sha256, ).hexdigest() esha1 = hmac.new( webhook_secret.encode("utf-8"), msg=payload.encode("utf-8"), digestmod=hashlib.sha1, ).hexdigest() print("Replay event for repo " + jeez["repository"]["full_name"]) headers.update( { "X-Hub-Signature": "sha1=" + esha1, "X-Hub-Signature-256": "sha256=" + esha256, } ) if args.save: save_script(args.save, el, headers, jeez) sys.exit(0) for _ in range(args.retry): try: r = requests.request( "POST", el, data=payload.encode("utf-8"), headers=headers ) except requests.exceptions.ConnectionError: print(f"sleeping until {el} is up") time.sleep(5) continue print(f"Payload has been replayed on {el}: {r}") return print("You have reached the maximum number of retries") def parse_args(): parser = argparse.ArgumentParser(description="Replay a webhook") parser.add_argument( "--installation-id", "-i", default=os.environ.get("INSTALLATION_ID"), help="Installation ID", ) parser.add_argument( "--controller-route", "-e", dest="eroute", help="Route hostname (default to detect on openshift/ingress)", default=os.environ.get("EL_ROUTE"), ) parser.add_argument("--last-event", "-L", action="store_true") parser.add_argument( "--webhook-repo", "-w", help="Use a webhook-repo instead of app" ) parser.add_argument("--webhook-token", "-t", help="Use this token") parser.add_argument("--webhook-secret", "-S", help="Use this webhook secret") parser.add_argument( "--save", "-s", help="save the request to a shell script to replay easily" ) parser.add_argument( "-a", "--api-url", help="Github API URL", default=os.environ.get("GITHUB_API_URL", ghapp_token.GITHUB_API_URL), ) parser.add_argument( "--retry", type=int, default=1, help="how many time to try to contact the el route", ) return parser.parse_args() if __name__ == "__main__": main(parse_args())
30.555241
113
0.623215
0
0
0
0
0
0
0
0
3,653
0.33868
5d579c372853402ecfd7e953a09a9d04c6d7c725
114
py
Python
nintendeals/noa/api/__init__.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
37
2020-04-30T13:48:02.000Z
2022-03-09T04:55:54.000Z
nintendeals/noa/api/__init__.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
4
2020-05-09T03:17:44.000Z
2021-04-28T00:53:55.000Z
nintendeals/noa/api/__init__.py
Pooroomoo/nintendeals
993f4d159ff405ed82cd2bb023c7b75d921d0acb
[ "MIT" ]
5
2020-07-22T06:42:27.000Z
2022-02-07T22:35:57.000Z
from .algolia import search_by_nsuid from .algolia import search_by_platform from .algolia import search_by_query
28.5
39
0.868421
0
0
0
0
0
0
0
0
0
0
5d58e721508a643ec9487a7f661ca1a66cd5a971
3,659
py
Python
076_Minimum_Window_Substring.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
076_Minimum_Window_Substring.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
076_Minimum_Window_Substring.py
joshlyman/Josh-LeetCode
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
[ "MIT" ]
null
null
null
# Other solution # V2 def minWindow(s, t): need = collections.Counter(t) #hash table to store char frequency missing = len(t) #total number of chars we care start, end = 0, 0 i = 0 for j, char in enumerate(s, 1): #index j from 1 if need[char] > 0: missing -= 1 need[char] -= 1 if missing == 0: #match all chars while i < j and need[s[i]] < 0: #remove chars to find the real start need[s[i]] += 1 i += 1 need[s[i]] += 1 #make sure the first appearing char satisfies need[char]>0 missing += 1 #we missed this first char, so add missing by 1 if end == 0 or j-i < end-start: #update window start, end = i, j i += 1 #update i to start+1 for next window return s[start:end] # Time: O(|S|+|T|) # Space:O(|S|+|T|) # Refer from: # https://leetcode.com/problems/minimum-window-substring/solution/ # Sliding Window # We start with two pointers, leftleft and rightright initially pointing to the first element of the string S. # We use the rightright pointer to expand the window until we get a desirable window i.e. a window that contains all of the characters of T. # Once we have a window with all the characters, we can move the left pointer ahead one by one. If the window is still a desirable one we keep on updating the minimum window size. # If the window is not desirable any more, we repeat step 2 onwards. # The current window is s[i:j] and the result window is s[I:J]. In need[c] I store how many times I # need character c (can be negative) and missing tells how many characters are still missing. # In the loop, first add the new character to the window. Then, if nothing is missing, # remove as much as possible from the window start and then update the result. class Solution: def minWindow(self, s: str, t: str) -> str: m = len(s) n = len(t) if m < n: return '' lt = {} # put t into dict (lt) and count how many # for each char for i in t: if i not in lt: lt[i] = 1 else: lt[i] += 1 # missing is to count how many remaining char needed from substring # finally get candidate substring which satisfy need of t missing = n i = I = J = 0 for j, c in enumerate(s, 1): if c in lt and lt[c] > 0: missing -= 1 if c in lt: # lt can be negative lt[c] -= 1 # i is index of candidate substring, remove as many as char from candidate while i < j and not missing: if not J or j-i < J-I: I, J = i, j if s[i] not in lt: i += 1 continue else: # if lt contains s[i], then # of s[i] +1, might reach to 0 lt[s[i]] += 1 # if > 0, means we need more, then missing +1 if lt[s[i]] > 0: missing += 1 i += 1 return s[I:J] # Time: O(|S|+|T|) # Space:O(|S|+|T|) # Optimized Sliding Window # A small improvement to the above approach can reduce the time complexity of the algorithm to O(2*∣filtered_S∣+∣S∣+∣T∣), # where filtered(S) is the string formed from S by removing all the elements not present in T
36.959596
179
0.52774
1,392
0.379188
0
0
0
0
0
0
1,972
0.537183
5d5c6de0926f1a98ed21db39c4944a17b7f61725
823
py
Python
home/migrations/0002_auto_20171017_0412.py
Taywee/amberherbert.com
6bf384d7cdf18dc613252fe4dde38545150eabbc
[ "MIT" ]
null
null
null
home/migrations/0002_auto_20171017_0412.py
Taywee/amberherbert.com
6bf384d7cdf18dc613252fe4dde38545150eabbc
[ "MIT" ]
2
2017-10-15T20:36:59.000Z
2017-10-17T05:27:49.000Z
home/migrations/0002_auto_20171017_0412.py
Taywee/amberherbert.com
6bf384d7cdf18dc613252fe4dde38545150eabbc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-10-17 04:12 from __future__ import unicode_literals from django.db import migrations import wagtail.core.blocks import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('home', '0001_initial'), ] operations = [ migrations.AlterField( model_name='homepage', name='navigation', field=wagtail.core.fields.StreamField((('item', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.CharBlock(help_text='If this is left blank, the title of the linked page will be used instead', max_length=16, required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=True))))),), blank=True, help_text='The list of navigation items', null=True), ), ]
35.782609
384
0.684083
619
0.752126
0
0
0
0
0
0
235
0.285541
5d5caf5c5d1415de34379e45359c322cac37e6ff
2,766
py
Python
lib/adv_model.py
chawins/entangle-rep
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
[ "MIT" ]
15
2019-06-30T12:30:17.000Z
2021-12-07T20:20:36.000Z
lib/adv_model.py
chawins/entangle-rep
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
[ "MIT" ]
2
2020-06-11T10:10:52.000Z
2021-12-21T08:50:33.000Z
lib/adv_model.py
chawins/entangle-rep
3e9e0d6e7536b0de0e35d7f8717f2ccc8e887759
[ "MIT" ]
9
2019-07-09T14:52:30.000Z
2020-10-27T19:18:34.000Z
import torch import torch.nn as nn import torch.nn.functional as F class PGDModel(nn.Module): """ code adapted from https://github.com/karandwivedi42/adversarial/blob/master/main.py """ def __init__(self, basic_net, config): super(PGDModel, self).__init__() self.basic_net = basic_net self.rand = config['random_start'] self.step_size = config['step_size'] self.epsilon = config['epsilon'] self.num_steps = config['num_steps'] assert config['loss_func'] == 'xent', 'Only xent supported for now.' def forward(self, inputs, targets, attack=False): if not attack: return self.basic_net(inputs) x = inputs.clone() if self.rand: x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon) for _ in range(self.num_steps): x.requires_grad_() with torch.enable_grad(): logits = self.basic_net(x) loss = F.cross_entropy(logits, targets, reduction='sum') grad = torch.autograd.grad(loss, x)[0] x = x.detach() + self.step_size * torch.sign(grad.detach()) x = torch.min(torch.max(x, inputs.detach() - self.epsilon), inputs.detach() + self.epsilon) x = torch.clamp(x, 0, 1) return self.basic_net(x) class PGDL2Model(nn.Module): """ code adapted from https://github.com/karandwivedi42/adversarial/blob/master/main.py """ def __init__(self, basic_net, config): super(PGDL2Model, self).__init__() self.basic_net = basic_net self.epsilon = config['epsilon'] self.rand = config['random_start'] self.step_size = config['step_size'] self.num_steps = config['num_steps'] assert config['loss_func'] == 'xent', 'Only xent supported for now.' def forward(self, inputs, targets, attack=False): if not attack: return self.basic_net(inputs) x = inputs.clone() if self.rand: x = x + torch.zeros_like(x).normal_(0, self.step_size) for _ in range(self.num_steps): x.requires_grad_() with torch.enable_grad(): logits = self.basic_net(x) loss = F.cross_entropy(logits, targets, reduction='sum') grad = torch.autograd.grad(loss, x)[0].detach() grad_norm = grad.view(x.size(0), -1).norm(2, 1) delta = self.step_size * grad / grad_norm.view(x.size(0), 1, 1, 1) x = x.detach() + delta diff = (x - inputs).view(x.size(0), -1).renorm(2, 0, self.epsilon) x = diff.view(x.size()) + inputs x.clamp_(0, 1) return self.basic_net(x)
35.012658
78
0.578453
2,693
0.973608
0
0
0
0
0
0
400
0.144613
5d5d633b271390741583d9b310e4391f6dfe899f
4,673
py
Python
fs_image/rpm/storage/tests/storage_base_test.py
singhaditya28/fs_image
3d122da48eab8b26e5add6754cc1f91296139c58
[ "MIT" ]
null
null
null
fs_image/rpm/storage/tests/storage_base_test.py
singhaditya28/fs_image
3d122da48eab8b26e5add6754cc1f91296139c58
[ "MIT" ]
null
null
null
fs_image/rpm/storage/tests/storage_base_test.py
singhaditya28/fs_image
3d122da48eab8b26e5add6754cc1f91296139c58
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import patch, MagicMock from typing import List, Tuple from .. import Storage # Module import to ensure we get plugins class StorageBaseTestCase(unittest.TestCase): 'A tiny test suite that can be used to check any Storage implementation.' def _check_write_and_read(self, storage: Storage, writes: List[bytes]): with storage.writer() as output: for piece in writes: output.write(piece) sid = output.commit() with storage.reader(sid) as input: written = b''.join(writes) partial_read = input.read(3) if written: self.assertGreater(len(partial_read), 0) self.assertLessEqual(len(partial_read), 3) self.assertEqual(written, partial_read + input.read()) return sid def check_storage_impl( self, storage: Storage, *, no_empty_blobs=False, skip_empty_writes=False, # To make testing more meaningful, it's useful to make sure that # some writes fill up any output buffers. For filesystem writes # from Python, this default is probably enough. mul=314159, # just about 300KB # If the blob-store has a read-through cache, we cannot effectively # test that the remove actually happened. remove_is_immediate=True, ) -> List[Tuple[List[str], str]]: # Writes + their storage ID # Make sure nothing bad happens if an exception flies before a # commit. Since we don't have an ID, we can't really test that the # partial write got discarded. with self.assertRaisesRegex(RuntimeError, '^humbug$'): with storage.writer() as output: output.write(b'bah') raise RuntimeError('humbug') with self.assertRaisesRegex(AssertionError, '^Cannot commit twice$'): with storage.writer() as output: output.write(b'foo') output.commit(remove_on_exception=True) # Leave no litter output.commit() # Check that the `remove_on_exception` kwarg triggers `remove`. mock_remove = MagicMock() with patch.object(storage, 'remove', mock_remove): with self.assertRaisesRegex(RuntimeError, '^remove_on_exception$'): with storage.writer() as output: output.write(b'foo') id_to_remove = output.commit(remove_on_exception=True) # Contract: committed blobs are available to read with storage.reader(id_to_remove) as reader: self.assertEqual(b'foo', reader.read()) raise RuntimeError('remove_on_exception') # Check that `remove` would have been called, and then call it. mock_remove.assert_called_once_with(id_to_remove) storage.remove(id_to_remove) # Exercise the real `remove` if remove_is_immediate: # The removed ID should not longer be available. with self.assertRaises(Exception): with storage.reader(id_to_remove) as input: # The reader may be a pipe from another Python process, # let's consume its output to avoid BrokenPipe logspam. input.read() return [ ( writes, self._check_write_and_read( storage, writes if i is None else [*writes[:i], b'', *writes[i:]], ), ) for writes in [ # Some large writes [b'abcd' * mul, b'efgh' * mul], [b'abc' * mul, b'defg' * mul], [b'abc' * mul, b'def' * mul, b'g' * mul], [b'abcd' * mul], [b'abc' * mul, b'd' * mul], # Some tiny writes without a multiplier [b'a', b'b', b'c', b'd'], [b'ab'], [b'a', b'b'], # While clowny, some blob storage systems refuse empty blobs. *([] if no_empty_blobs else [ [b''], [], ]), ] # Test the given writes, optionally insert a blank at each pos for i in [ None, *([] if skip_empty_writes else range(len(writes) + 1)), ] ]
41.723214
79
0.558742
4,312
0.922748
0
0
0
0
0
0
1,569
0.335759
5d5d8bde571d6e8d8f2723242cd35348a71ff40f
8,457
py
Python
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'GetKMSCryptoKeyVersionResult', 'AwaitableGetKMSCryptoKeyVersionResult', 'get_kms_crypto_key_version', 'get_kms_crypto_key_version_output', ] @pulumi.output_type class GetKMSCryptoKeyVersionResult: """ A collection of values returned by getKMSCryptoKeyVersion. """ def __init__(__self__, algorithm=None, crypto_key=None, id=None, name=None, protection_level=None, public_keys=None, state=None, version=None): if algorithm and not isinstance(algorithm, str): raise TypeError("Expected argument 'algorithm' to be a str") pulumi.set(__self__, "algorithm", algorithm) if crypto_key and not isinstance(crypto_key, str): raise TypeError("Expected argument 'crypto_key' to be a str") pulumi.set(__self__, "crypto_key", crypto_key) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if protection_level and not isinstance(protection_level, str): raise TypeError("Expected argument 'protection_level' to be a str") pulumi.set(__self__, "protection_level", protection_level) if public_keys and not isinstance(public_keys, list): raise TypeError("Expected argument 'public_keys' to be a list") pulumi.set(__self__, "public_keys", public_keys) if state and not isinstance(state, str): raise TypeError("Expected argument 'state' to be a str") pulumi.set(__self__, "state", state) if version and not isinstance(version, int): raise TypeError("Expected argument 'version' to be a int") pulumi.set(__self__, "version", version) @property @pulumi.getter def algorithm(self) -> str: """ The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports. """ return pulumi.get(self, "algorithm") @property @pulumi.getter(name="cryptoKey") def crypto_key(self) -> str: return pulumi.get(self, "crypto_key") @property @pulumi.getter def id(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*` """ return pulumi.get(self, "name") @property @pulumi.getter(name="protectionLevel") def protection_level(self) -> str: """ The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs. """ return pulumi.get(self, "protection_level") @property @pulumi.getter(name="publicKeys") def public_keys(self) -> Sequence['outputs.GetKMSCryptoKeyVersionPublicKeyResult']: """ If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below. """ return pulumi.get(self, "public_keys") @property @pulumi.getter def state(self) -> str: """ The current state of the CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs. """ return pulumi.get(self, "state") @property @pulumi.getter def version(self) -> Optional[int]: return pulumi.get(self, "version") class AwaitableGetKMSCryptoKeyVersionResult(GetKMSCryptoKeyVersionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetKMSCryptoKeyVersionResult( algorithm=self.algorithm, crypto_key=self.crypto_key, id=self.id, name=self.name, protection_level=self.protection_level, public_keys=self.public_keys, state=self.state, version=self.version) def get_kms_crypto_key_version(crypto_key: Optional[str] = None, version: Optional[int] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKMSCryptoKeyVersionResult: """ Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) and [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). A CryptoKeyVersion represents an individual cryptographic key, and the associated key material. ## Example Usage ```python import pulumi import pulumi_gcp as gcp my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", location="us-central1") my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", key_ring=my_key_ring.id) my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"]) ``` :param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the `kms.CryptoKey` resource/datasource. :param int version: The version number for this CryptoKeyVersion. Defaults to `1`. """ __args__ = dict() __args__['cryptoKey'] = crypto_key __args__['version'] = version if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion', __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult).value return AwaitableGetKMSCryptoKeyVersionResult( algorithm=__ret__.algorithm, crypto_key=__ret__.crypto_key, id=__ret__.id, name=__ret__.name, protection_level=__ret__.protection_level, public_keys=__ret__.public_keys, state=__ret__.state, version=__ret__.version) @_utilities.lift_output_func(get_kms_crypto_key_version) def get_kms_crypto_key_version_output(crypto_key: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[Optional[int]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKMSCryptoKeyVersionResult]: """ Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see [the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version) and [API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions). A CryptoKeyVersion represents an individual cryptographic key, and the associated key material. ## Example Usage ```python import pulumi import pulumi_gcp as gcp my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring", location="us-central1") my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key", key_ring=my_key_ring.id) my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"]) ``` :param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the `kms.CryptoKey` resource/datasource. :param int version: The version number for this CryptoKeyVersion. Defaults to `1`. """ ...
41.253659
247
0.684522
4,307
0.509282
394
0.046589
5,316
0.628592
0
0
4,386
0.518624
5d5e98fd28e904d5e1f509a5b35e66ec3047cc56
353
py
Python
lecture11/subsets.py
nd-cse-30872-fa20/cse-30872-fa20-examples
7a991a0499e03bf91ac8ba40c99245d5d926e20c
[ "MIT" ]
null
null
null
lecture11/subsets.py
nd-cse-30872-fa20/cse-30872-fa20-examples
7a991a0499e03bf91ac8ba40c99245d5d926e20c
[ "MIT" ]
null
null
null
lecture11/subsets.py
nd-cse-30872-fa20/cse-30872-fa20-examples
7a991a0499e03bf91ac8ba40c99245d5d926e20c
[ "MIT" ]
2
2020-08-10T15:05:39.000Z
2020-08-12T15:16:01.000Z
#!/usr/bin/env python3 import itertools # Constants NUMBERS = range(0, 10) # Main Execution def main(): count = 0 for length in range(0, len(NUMBERS) + 1): for subset in itertools.combinations(NUMBERS, length): if sum(subset) % 3 == 0: count += 1 print(count) if __name__ == '__main__': main()
16.045455
62
0.580737
0
0
0
0
0
0
0
0
59
0.167139
5d5ec924b9a968b7509ed5badaef99e8de842bde
21,520
py
Python
src/toil/batchSystems/abstractBatchSystem.py
Hexotical/toil
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
[ "Apache-2.0" ]
348
2018-07-08T03:38:28.000Z
2022-03-11T18:57:44.000Z
src/toil/batchSystems/abstractBatchSystem.py
Hexotical/toil
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
[ "Apache-2.0" ]
1,700
2018-07-05T18:28:49.000Z
2022-03-31T14:09:04.000Z
src/toil/batchSystems/abstractBatchSystem.py
Hexotical/toil
312b6e1f221ee7f7f187dd6dbfce1aecffd00e09
[ "Apache-2.0" ]
126
2018-07-11T18:59:29.000Z
2022-01-24T03:14:02.000Z
# Copyright (C) 2015-2021 Regents of the University of California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import logging import os import shutil from abc import ABC, abstractmethod from argparse import ArgumentParser, _ArgumentGroup from contextlib import contextmanager from typing import (Any, Callable, ContextManager, Dict, Iterator, List, Optional, Tuple, Type, TypeVar, Union, NamedTuple) from toil.common import Toil, cacheDirName, Config from toil.deferred import DeferredFunctionManager from toil.fileStores.abstractFileStore import AbstractFileStore from toil.job import JobDescription from toil.resource import Resource logger = logging.getLogger(__name__) # Value to use as exitStatus in UpdatedBatchJobInfo.exitStatus when status is not available. EXIT_STATUS_UNAVAILABLE_VALUE = 255 class BatchJobExitReason(enum.Enum): FINISHED: int = 1 # Successfully finished. FAILED: int = 2 # Job finished, but failed. LOST: int = 3 # Preemptable failure (job's executing host went away). KILLED: int = 4 # Job killed before finishing. ERROR: int = 5 # Internal error. MEMLIMIT: int = 6 # Job hit batch system imposed memory limit class UpdatedBatchJobInfo(NamedTuple): jobID: int exitStatus: int """ The exit status (integer value) of the job. 0 implies successful. EXIT_STATUS_UNAVAILABLE_VALUE is used when the exit status is not available (e.g. job is lost). """ exitReason: Optional[BatchJobExitReason] wallTime: Union[float, int, None] # Information required for worker cleanup on shutdown of the batch system. class WorkerCleanupInfo(NamedTuple): workDir: str """workdir path (where the cache would go)""" workflowID: str """used to identify files specific to this workflow""" cleanWorkDir: str class AbstractBatchSystem(ABC): """ An abstract (as far as Python currently allows) base class to represent the interface the batch system must provide to Toil. """ @classmethod @abstractmethod def supportsAutoDeployment(cls) -> bool: """ Whether this batch system supports auto-deployment of the user script itself. If it does, the :meth:`.setUserScript` can be invoked to set the resource object representing the user script. Note to implementors: If your implementation returns True here, it should also override """ raise NotImplementedError() @classmethod @abstractmethod def supportsWorkerCleanup(cls) -> bool: """ Indicates whether this batch system invokes :meth:`BatchSystemSupport.workerCleanup` after the last job for a particular workflow invocation finishes. Note that the term *worker* refers to an entire node, not just a worker process. A worker process may run more than one job sequentially, and more than one concurrent worker process may exist on a worker node, for the same workflow. The batch system is said to *shut down* after the last worker process terminates. """ raise NotImplementedError() def setUserScript(self, userScript: Resource) -> None: """ Set the user script for this workflow. This method must be called before the first job is issued to this batch system, and only if :meth:`.supportsAutoDeployment` returns True, otherwise it will raise an exception. :param userScript: the resource object representing the user script or module and the modules it depends on. """ raise NotImplementedError() @abstractmethod def issueBatchJob(self, jobDesc: JobDescription, job_environment: Optional[Dict[str, str]] = None) -> int: """ Issues a job with the specified command to the batch system and returns a unique jobID. :param jobDesc a toil.job.JobDescription :param job_environment: a collection of job-specific environment variables to be set on the worker. :return: a unique jobID that can be used to reference the newly issued job """ raise NotImplementedError() @abstractmethod def killBatchJobs(self, jobIDs: List[int]) -> None: """ Kills the given job IDs. After returning, the killed jobs will not appear in the results of getRunningBatchJobIDs. The killed job will not be returned from getUpdatedBatchJob. :param jobIDs: list of IDs of jobs to kill """ raise NotImplementedError() # FIXME: Return value should be a set (then also fix the tests) @abstractmethod def getIssuedBatchJobIDs(self) -> List[int]: """ Gets all currently issued jobs :return: A list of jobs (as jobIDs) currently issued (may be running, or may be waiting to be run). Despite the result being a list, the ordering should not be depended upon. """ raise NotImplementedError() @abstractmethod def getRunningBatchJobIDs(self) -> Dict[int, float]: """ Gets a map of jobs as jobIDs that are currently running (not just waiting) and how long they have been running, in seconds. :return: dictionary with currently running jobID keys and how many seconds they have been running as the value """ raise NotImplementedError() @abstractmethod def getUpdatedBatchJob(self, maxWait: int) -> Optional[UpdatedBatchJobInfo]: """ Returns information about job that has updated its status (i.e. ceased running, either successfully or with an error). Each such job will be returned exactly once. Does not return info for jobs killed by killBatchJobs, although they may cause None to be returned earlier than maxWait. :param maxWait: the number of seconds to block, waiting for a result :return: If a result is available, returns UpdatedBatchJobInfo. Otherwise it returns None. wallTime is the number of seconds (a strictly positive float) in wall-clock time the job ran for, or None if this batch system does not support tracking wall time. """ raise NotImplementedError() def getSchedulingStatusMessage(self) -> Optional[str]: """ Get a log message fragment for the user about anything that might be going wrong in the batch system, if available. If no useful message is available, return None. This can be used to report what resource is the limiting factor when scheduling jobs, for example. If the leader thinks the workflow is stuck, the message can be displayed to the user to help them diagnose why it might be stuck. :return: User-directed message about scheduling state. """ # Default implementation returns None. # Override to provide scheduling status information. return None @abstractmethod def shutdown(self) -> None: """ Called at the completion of a toil invocation. Should cleanly terminate all worker threads. """ raise NotImplementedError() def setEnv(self, name: str, value: Optional[str] = None) -> None: """ Set an environment variable for the worker process before it is launched. The worker process will typically inherit the environment of the machine it is running on but this method makes it possible to override specific variables in that inherited environment before the worker is launched. Note that this mechanism is different to the one used by the worker internally to set up the environment of a job. A call to this method affects all jobs issued after this method returns. Note to implementors: This means that you would typically need to copy the variables before enqueuing a job. If no value is provided it will be looked up from the current environment. """ raise NotImplementedError() @classmethod def add_options(cls, parser: Union[ArgumentParser, _ArgumentGroup]) -> None: """ If this batch system provides any command line options, add them to the given parser. """ pass OptionType = TypeVar('OptionType') @classmethod def setOptions(cls, setOption: Callable[[str, Optional[Callable[[Any], OptionType]], Optional[Callable[[OptionType], None]], Optional[OptionType], Optional[List[str]]], None]) -> None: """ Process command line or configuration options relevant to this batch system. :param setOption: A function with signature setOption(option_name, parsing_function=None, check_function=None, default=None, env=None) returning nothing, used to update run configuration as a side effect. """ # TODO: change type to a Protocol to express kwarg names, or else use a # different interface (generator?) pass def getWorkerContexts(self) -> List[ContextManager[Any]]: """ Get a list of picklable context manager objects to wrap worker work in, in order. Can be used to ask the Toil worker to do things in-process (such as configuring environment variables, hot-deploying user scripts, or cleaning up a node) that would otherwise require a wrapping "executor" process. """ return [] class BatchSystemSupport(AbstractBatchSystem): """ Partial implementation of AbstractBatchSystem, support methods. """ def __init__(self, config: Config, maxCores: float, maxMemory: int, maxDisk: int) -> None: """ Initializes initial state of the object :param toil.common.Config config: object is setup by the toilSetup script and has configuration parameters for the jobtree. You can add code to that script to get parameters for your batch system. :param float maxCores: the maximum number of cores the batch system can request for any one job :param int maxMemory: the maximum amount of memory the batch system can request for any one job, in bytes :param int maxDisk: the maximum amount of disk space the batch system can request for any one job, in bytes """ super().__init__() self.config = config self.maxCores = maxCores self.maxMemory = maxMemory self.maxDisk = maxDisk self.environment: Dict[str, str] = {} self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir, workflowID=self.config.workflowID, cleanWorkDir=self.config.cleanWorkDir) def checkResourceRequest(self, memory: int, cores: float, disk: int, job_name: str = '', detail: str = '') -> None: """ Check resource request is not greater than that available or allowed. :param int memory: amount of memory being requested, in bytes :param float cores: number of cores being requested :param int disk: amount of disk space being requested, in bytes :param str job_name: Name of the job being checked, for generating a useful error report. :param str detail: Batch-system-specific message to include in the error. :raise InsufficientSystemResources: raised when a resource is requested in an amount greater than allowed """ batch_system = self.__class__.__name__ or 'this batch system' for resource, requested, available in [('cores', cores, self.maxCores), ('memory', memory, self.maxMemory), ('disk', disk, self.maxDisk)]: assert requested is not None if requested > available: unit = 'bytes of ' if resource in ('disk', 'memory') else '' R = f'The job {job_name} is r' if job_name else 'R' if resource == 'disk': msg = (f'{R}equesting {requested} {unit}{resource} for temporary space, ' f'more than the maximum of {available} {unit}{resource} of free space on ' f'{self.config.workDir} that {batch_system} was configured with, or enforced ' f'by --max{resource.capitalize()}. Try setting/changing the toil option ' f'"--workDir" or changing the base temporary directory by setting TMPDIR.') else: msg = (f'{R}equesting {requested} {unit}{resource}, more than the maximum of ' f'{available} {unit}{resource} that {batch_system} was configured with, ' f'or enforced by --max{resource.capitalize()}.') if detail: msg += detail raise InsufficientSystemResources(msg) def setEnv(self, name: str, value: Optional[str] = None) -> None: """ Set an environment variable for the worker process before it is launched. The worker process will typically inherit the environment of the machine it is running on but this method makes it possible to override specific variables in that inherited environment before the worker is launched. Note that this mechanism is different to the one used by the worker internally to set up the environment of a job. A call to this method affects all jobs issued after this method returns. Note to implementors: This means that you would typically need to copy the variables before enqueuing a job. If no value is provided it will be looked up from the current environment. :param str name: the environment variable to be set on the worker. :param str value: if given, the environment variable given by name will be set to this value. if None, the variable's current value will be used as the value on the worker :raise RuntimeError: if value is None and the name cannot be found in the environment """ if value is None: try: value = os.environ[name] except KeyError: raise RuntimeError(f"{name} does not exist in current environment") self.environment[name] = value def formatStdOutErrPath(self, toil_job_id: int, cluster_job_id: str, std: str) -> str: """ Format path for batch system standard output/error and other files generated by the batch system itself. Files will be written to the Toil work directory (which may be on a shared file system) with names containing both the Toil and batch system job IDs, for ease of debugging job failures. :param: int toil_job_id : The unique id that Toil gives a job. :param: cluster_job_id : What the cluster, for example, GridEngine, uses as its internal job id. :param: string std : The provenance of the stream (for example: 'err' for 'stderr' or 'out' for 'stdout') :rtype: string : Formatted filename; however if self.config.noStdOutErr is true, returns '/dev/null' or equivalent. """ if self.config.noStdOutErr: return os.devnull fileName: str = f'toil_{self.config.workflowID}.{toil_job_id}.{cluster_job_id}.{std}.log' workDir: str = Toil.getToilWorkDir(self.config.workDir) return os.path.join(workDir, fileName) @staticmethod def workerCleanup(info: WorkerCleanupInfo) -> None: """ Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`. :param WorkerCleanupInfo info: A named tuple consisting of all the relevant information for cleaning up the worker. """ assert isinstance(info, WorkerCleanupInfo) workflowDir = Toil.getLocalWorkflowDir(info.workflowID, info.workDir) DeferredFunctionManager.cleanupWorker(workflowDir) workflowDirContents = os.listdir(workflowDir) AbstractFileStore.shutdownFileStore(workflowDir, info.workflowID) if (info.cleanWorkDir == 'always' or info.cleanWorkDir in ('onSuccess', 'onError') and workflowDirContents in ([], [cacheDirName(info.workflowID)])): shutil.rmtree(workflowDir, ignore_errors=True) class NodeInfo: """ The coresUsed attribute is a floating point value between 0 (all cores idle) and 1 (all cores busy), reflecting the CPU load of the node. The memoryUsed attribute is a floating point value between 0 (no memory used) and 1 (all memory used), reflecting the memory pressure on the node. The coresTotal and memoryTotal attributes are the node's resources, not just the used resources The requestedCores and requestedMemory attributes are all the resources that Toil Jobs have reserved on the node, regardless of whether the resources are actually being used by the Jobs. The workers attribute is an integer reflecting the number of workers currently active workers on the node. """ def __init__(self, coresUsed: float, memoryUsed: float, coresTotal: float, memoryTotal: int, requestedCores: float, requestedMemory: int, workers: int) -> None: self.coresUsed = coresUsed self.memoryUsed = memoryUsed self.coresTotal = coresTotal self.memoryTotal = memoryTotal self.requestedCores = requestedCores self.requestedMemory = requestedMemory self.workers = workers class AbstractScalableBatchSystem(AbstractBatchSystem): """ A batch system that supports a variable number of worker nodes. Used by :class:`toil. provisioners.clusterScaler.ClusterScaler` to scale the number of worker nodes in the cluster up or down depending on overall load. """ @abstractmethod def getNodes(self, preemptable: Optional[bool] = None) -> Dict[str, NodeInfo]: """ Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to NodeInfo objects, one for each node. :param preemptable: If True (False) only (non-)preemptable nodes will be returned. If None, all nodes will be returned. """ raise NotImplementedError() @abstractmethod def nodeInUse(self, nodeIP: str) -> bool: """ Can be used to determine if a worker node is running any tasks. If the node is doesn't exist, this function should simply return False. :param nodeIP: The worker nodes private IP address :return: True if the worker node has been issued any tasks, else False """ raise NotImplementedError() # TODO: May be unused! @abstractmethod @contextmanager def nodeFiltering(self, filter: Optional[Callable[[NodeInfo], bool]]) -> Iterator[None]: """ Used to prevent races in autoscaling where 1) nodes have reported to the autoscaler as having no jobs 2) scaler decides to terminate these nodes. In parallel the batch system assigns jobs to the same nodes 3) scaler terminates nodes, resulting in job failures for all jobs on that node. Call this method prior to node termination to ensure that nodes being considered for termination are not assigned new jobs. Call the method again passing None as the filter to disable the filtering after node termination is done. :param method: This will be used as a filter on nodes considered when assigning new jobs. After this context manager exits the filter should be removed """ raise NotImplementedError() @abstractmethod def ignoreNode(self, nodeAddress: str) -> None: """ Stop sending jobs to this node. Used in autoscaling when the autoscaler is ready to terminate a node, but jobs are still running. This allows the node to be terminated after the current jobs have finished. :param nodeAddress: IP address of node to ignore. """ raise NotImplementedError() @abstractmethod def unignoreNode(self, nodeAddress: str) -> None: """ Stop ignoring this address, presumably after a node with this address has been terminated. This allows for the possibility of a new node having the same address as a terminated one. """ raise NotImplementedError() class InsufficientSystemResources(Exception): pass
42.613861
188
0.660037
19,888
0.924164
0
0
8,281
0.384805
0
0
14,253
0.662314
5d6066cbcb225097d2a844447d53060bd3350b74
1,969
py
Python
demo/other_demo.py
Heartfilia/lite_tools
b3432ba7cb60502ac64d45e23022e20555fb1588
[ "MIT" ]
5
2021-05-10T07:35:47.000Z
2022-03-07T01:31:12.000Z
demo/other_demo.py
Heartfilia/lite_tools
b3432ba7cb60502ac64d45e23022e20555fb1588
[ "MIT" ]
null
null
null
demo/other_demo.py
Heartfilia/lite_tools
b3432ba7cb60502ac64d45e23022e20555fb1588
[ "MIT" ]
1
2022-03-03T03:23:26.000Z
2022-03-03T03:23:26.000Z
# -*- coding: utf-8 -*- from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d # about hashlib ==> get_md5, get_sha, get_sha3 || default mode=256 s = "test_information" # 这里只能丢字符串 print(get_md5(s)) # 5414ffd88fcb58417e64ecec51bb3a6b print(get_md5(s, upper=True)) # 5414FFD88FCB58417E64ECEC51BB3A6B print(get_md5(s, to_bin=True)) # b'T\x14\xff\xd8\x8f\xcbXA~d\xec\xecQ\xbb:k' # 转成二进制的需求没什么用但是可以保留 print(get_sha(s)) # d09869fdf901465c8566f0e2debfa3f6a3d878a8157e199c7c4c6dd755617f33 print(get_sha(s, to_bin=True)) # b'\xd0\x98i\xfd\xf9\x01F\\\x85f\xf0\xe2\xde\xbf\xa3\xf6\xa3\xd8x\xa8\x15~\x19\x9c|Lm\xd7Ua\x7f3' print(get_sha(s, mode=1)) # ada5dfdf0c9a76a84958310b838a70b6fd6d01f6 # default mode=256 // mode: 1 224 256 384 512 print(get_sha3(s)) # 9c539ca35c6719f546e67837ff37fe7791e53fe40715cd4da0167c78c9adc2e8 print(get_sha3(s, to_bin=True)) # b'\x9cS\x9c\xa3\\g\x19\xf5F\xe6x7\xff7\xfew\x91\xe5?\xe4\x07\x15\xcdM\xa0\x16|x\xc9\xad\xc2\xe8' print(get_sha3(s, mode=1)) # return "" // SUPPORT: sha3_224 sha3_256 sha3_384 sha3_512// only need inputting: 224 256 384 512 # default mode=256 // mode: 224 256 384 512 print(get_sha3(s, mode=384)) # 95c09e20a139843eae877a64cd95d6a629b3c9ff383b5460557aab2612682d4228d05fe41606a79acf5ae1c4de35160c # about base64 ==> get_b64e, get_b64d res_b64_encode = get_b64e(s) print(res_b64_encode) # dGVzdF9pbmZvcm1hdGlvbg== res_b64_bin = get_b64e(s, to_bin=True) print(res_b64_bin) # b'dGVzdF9pbmZvcm1hdGlvbg==' res_b32_encode = get_b64e(s, mode=32) # default mode=64 // mode: 16 32 64 85 print(res_b32_encode) # ORSXG5C7NFXGM33SNVQXI2LPNY====== res_b64_decode = get_b64d(res_b64_encode) print(res_b64_decode) # test_information res_b32_decode = get_b64d(res_b32_encode, mode=32) # default mode=64 // mode: 16 32 64 85 print(res_b32_decode) # test_information
57.911765
176
0.718639
0
0
0
0
0
0
0
0
1,208
0.597724
5d61828a9a51cb5ce1865c213ffd2c5903a688a4
47
py
Python
prereise/gather/solardata/tests/__init__.py
terrywqf/PreREISE
f8052dd37091eaa15024725d5c92a3ef0ee311ee
[ "MIT" ]
null
null
null
prereise/gather/solardata/tests/__init__.py
terrywqf/PreREISE
f8052dd37091eaa15024725d5c92a3ef0ee311ee
[ "MIT" ]
null
null
null
prereise/gather/solardata/tests/__init__.py
terrywqf/PreREISE
f8052dd37091eaa15024725d5c92a3ef0ee311ee
[ "MIT" ]
null
null
null
__all__ = ["mock_pv_info", "test_pv_tracking"]
23.5
46
0.744681
0
0
0
0
0
0
0
0
32
0.680851
5d61d078db118ba78d2af9ae995c1fa84aa2f450
2,306
py
Python
arfit/cp_utils.py
farr/arfit
7ff6def331ef98f43f623da2d9867d1ac967448b
[ "MIT" ]
5
2015-04-29T21:46:52.000Z
2021-05-13T04:59:23.000Z
arfit/cp_utils.py
afcarl/arfit
7ff6def331ef98f43f623da2d9867d1ac967448b
[ "MIT" ]
null
null
null
arfit/cp_utils.py
afcarl/arfit
7ff6def331ef98f43f623da2d9867d1ac967448b
[ "MIT" ]
2
2015-12-03T12:08:32.000Z
2018-05-26T16:20:31.000Z
import carmcmc as cm from gatspy.periodic import LombScargleFast import matplotlib.pyplot as plt import numpy as np def csample_from_files(datafile, chainfile, p, q): data = np.loadtxt(datafile) times, tind = np.unique(data[:,0], return_index=True) data = data[tind, :] chain = np.loadtxt(chainfile) assert chain.shape[1] == p + q + 5, 'dimension mismatch' return cm.CarmaSample(data[:,0], data[:,1], data[:,2], None, q=q, trace=chain[:,:-2], loglike=chain[:,-2], logpost=chain[:,-1]) def normalised_lombscargle(ts, ys, dys, oversampling=5, nyquist_factor=3): model = LombScargleFast().fit(ts, ys, dys) pers, pows = model.periodogram_auto(oversampling=oversampling, nyquist_factor=nyquist_factor) fs = 1.0/pers T = np.max(ts) - np.min(ts) mu = 1/T*np.trapz(ys, ts) s2 = 1/T*np.trapz(np.square(ys-mu), ts) return fs, s2*pows/np.trapz(pows, fs) def plot_psd_sample_data(sample, oversampling=5, nyquist_factor=3): psd_low, psd_high, psd_med, fs = sample.plot_power_spectrum(doShow=False) plt.clf() plt.loglog(fs, psd_med, '-b', alpha=0.33) plt.fill_between(fs, psd_low, psd_high, color='b', alpha=0.17) fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor) bw = fs[-1] - fs[0] T = sample.time[-1] - sample.time[0] s2 = 1/T*np.trapz(np.square(sample.ysig), sample.time) noise_level = s2/bw levels = noise_level*np.sqrt(sample.get_samples('measerr_scale')) plt.axhline(np.median(levels), color='g', alpha=0.33) plt.fill_between(fs, np.percentile(levels, 84)+0*fs, np.percentile(levels, 16)+0*fs, color='g', alpha=0.17) plt.loglog(fs, psd, '-r', alpha=0.33) def plot_psd_sample_draw(sample, loc='upper left', oversampling=5, nyquist_factor=3): fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor) ys_draw = sample.predict(sample.time, bestfit='random')[0] fs, dpsd = normalised_lombscargle(sample.time, ys_draw, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor) plt.loglog(fs, psd, '-k', label='Data', alpha=0.5) plt.loglog(fs, dpsd, '-b', label='Prediction', alpha=0.5) plt.legend(loc=loc)
37.193548
131
0.691674
0
0
0
0
0
0
0
0
98
0.042498
5d65143322eb65bf3b9638c414999e21eb0323db
1,319
py
Python
pdiffcopy/hashing.py
xolox/python-pdiffcopy
ed765af92c0c0823818d545e61384753912a5725
[ "MIT" ]
5
2020-03-07T00:01:24.000Z
2020-12-03T03:44:26.000Z
pdiffcopy/hashing.py
xolox/python-pdiffcopy
ed765af92c0c0823818d545e61384753912a5725
[ "MIT" ]
null
null
null
pdiffcopy/hashing.py
xolox/python-pdiffcopy
ed765af92c0c0823818d545e61384753912a5725
[ "MIT" ]
null
null
null
# Fast large file synchronization inspired by rsync. # # Author: Peter Odding <[email protected]> # Last Change: March 6, 2020 # URL: https://pdiffcopy.readthedocs.io """Parallel hashing of files using :mod:`multiprocessing` and :mod:`pdiffcopy.mp`.""" # Standard library modules. import functools import hashlib import os # External dependencies. from six.moves import range # Modules included in our package. from pdiffcopy.mp import WorkerPool # Public identifiers that require documentation. __all__ = ("compute_hashes", "hash_worker") def compute_hashes(filename, block_size, method, concurrency): """Compute checksums of a file in blocks (parallel).""" with WorkerPool( concurrency=concurrency, generator_fn=functools.partial(range, 0, os.path.getsize(filename), block_size), worker_fn=functools.partial(hash_worker, block_size=block_size, filename=filename, method=method), ) as pool: for offset, digest in pool: yield offset, digest def hash_worker(offset, block_size, filename, method): """Worker function to be run in child processes.""" with open(filename, "rb") as handle: handle.seek(offset) context = hashlib.new(method) context.update(handle.read(block_size)) return offset, context.hexdigest()
31.404762
106
0.720243
0
0
456
0.345716
0
0
0
0
523
0.396513
5d65c01cd0ad11126b7931d199f6927d742a24e8
2,170
py
Python
pyscf/nao/test/test_0003_na2_nao.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
3
2021-02-28T00:52:53.000Z
2021-03-01T06:23:33.000Z
pyscf/nao/test/test_0003_na2_nao.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
36
2018-08-22T19:44:03.000Z
2020-05-09T10:02:36.000Z
pyscf/nao/test/test_0003_na2_nao.py
robert-anderson/pyscf
cdc56e168cb15f47e8cdc791a92d689fa9b655af
[ "Apache-2.0" ]
4
2018-02-14T16:28:28.000Z
2019-08-12T16:40:30.000Z
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest from pyscf.nao.m_siesta_utils import get_siesta_command, get_pseudo class KnowValues(unittest.TestCase): def test_siesta2sv_df(self): import subprocess import os siesta_fdf = """ xml.write .true. PAO.EnergyShift 100 meV %block ChemicalSpeciesLabel 1 11 Na %endblock ChemicalSpeciesLabel NumberOfAtoms 2 NumberOfSpecies 1 %block AtomicCoordinatesAndAtomicSpecies 0.77573521 0.00000000 0.00000000 1 -0.77573521 0.00000000 0.00000000 1 %endblock AtomicCoordinatesAndAtomicSpecies MD.NumCGsteps 0 COOP.Write .true. WriteDenchar .true. """ label = 'siesta' fi = open(label+'.fdf', 'w') print(siesta_fdf, file=fi) fi.close() for sp in ['Na']: try: os.remove(sp+'.psf') except : pass try: pppath = get_pseudo(sp) except: print('get_pseudo( '+sp+' ) is not working--> skip siesta run' ) return os.symlink(pppath, sp+'.psf') errorcode = subprocess.call(get_siesta_command(label), shell=True) if errorcode: raise RuntimeError('siesta returned an error: {0}'.format(errorcode)) # run test system_vars from pyscf.nao import mf sv = mf(label=label) self.assertEqual(sv.norbs, 10) self.assertTrue( sv.diag_check() ) self.assertTrue( sv.overlap_check()) if __name__ == "__main__": unittest.main()
31.449275
87
0.657604
1,390
0.640553
0
0
0
0
0
0
1,282
0.590783
5d65f544314984a2fee60579b78ec312b1835ccc
491
py
Python
tests/moz_library/rental_books_test.py
mozkzki/moz-library
fb925414405a9fcba8bb7194cf983ba18c920e2f
[ "MIT" ]
null
null
null
tests/moz_library/rental_books_test.py
mozkzki/moz-library
fb925414405a9fcba8bb7194cf983ba18c920e2f
[ "MIT" ]
35
2021-10-09T13:08:33.000Z
2022-03-29T14:26:59.000Z
tests/moz_library/rental_books_test.py
mozkzki/moz-library
fb925414405a9fcba8bb7194cf983ba18c920e2f
[ "MIT" ]
null
null
null
import pytest from moz_library.rental_books import RentalBooks class TestRentalBooks: @pytest.fixture() def books1(self): return RentalBooks() def test_can_extend_period_1(self, books1): assert books1._can_extend_period("延長できません") is False def test_can_extend_period_2(self, books1): assert books1._can_extend_period("すでに延長されています") is False def test_can_extend_period_3(self, books1): assert books1._can_extend_period("それ以外") is True
27.277778
64
0.737271
469
0.876636
0
0
68
0.127103
0
0
72
0.134579
5d66ab6b71d371d38fa50d90c8734a50bf50ee30
2,625
py
Python
examples/src/Charts/MultiCategoryChart.py
aspose-slides/Aspose.Slides-for-Python-via-.NET
c55ad5c71f942598f1e67e22a52cbcd1cb286467
[ "MIT" ]
null
null
null
examples/src/Charts/MultiCategoryChart.py
aspose-slides/Aspose.Slides-for-Python-via-.NET
c55ad5c71f942598f1e67e22a52cbcd1cb286467
[ "MIT" ]
null
null
null
examples/src/Charts/MultiCategoryChart.py
aspose-slides/Aspose.Slides-for-Python-via-.NET
c55ad5c71f942598f1e67e22a52cbcd1cb286467
[ "MIT" ]
null
null
null
import aspose.pydrawing as drawing import aspose.slides as slides def charts_multi_category_chart(): #ExStart:MultiCategoryChart # The path to the documents directory. outDir = "./examples/out/" with slides.Presentation() as pres: slide = pres.slides[0] ch = pres.slides[0].shapes.add_chart(slides.charts.ChartType.CLUSTERED_COLUMN, 100, 100, 600, 450) ch.chart_data.series.clear() ch.chart_data.categories.clear() fact = ch.chart_data.chart_data_workbook fact.clear(0) defaultWorksheetIndex = 0 category = ch.chart_data.categories.add(fact.get_cell(0, "c2", "A")) category.grouping_levels.set_grouping_item(1, "Group1") category = ch.chart_data.categories.add(fact.get_cell(0, "c3", "B")) category = ch.chart_data.categories.add(fact.get_cell(0, "c4", "C")) category.grouping_levels.set_grouping_item(1, "Group2") category = ch.chart_data.categories.add(fact.get_cell(0, "c5", "D")) category = ch.chart_data.categories.add(fact.get_cell(0, "c6", "E")) category.grouping_levels.set_grouping_item(1, "Group3") category = ch.chart_data.categories.add(fact.get_cell(0, "c7", "F")) category = ch.chart_data.categories.add(fact.get_cell(0, "c8", "G")) category.grouping_levels.set_grouping_item(1, "Group4") category = ch.chart_data.categories.add(fact.get_cell(0, "c9", "H")) # Adding Series series = ch.chart_data.series.add(fact.get_cell(0, "D1", "Series 1"), slides.charts.ChartType.CLUSTERED_COLUMN) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D2", 10)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D3", 20)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D4", 30)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D5", 40)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D6", 50)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D7", 60)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D8", 70)) series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D9", 80)) # Save presentation with chart pres.save(outDir + "charts_multi_category_chart_out.pptx", slides.export.SaveFormat.PPTX) #ExEnd:MultiCategoryChart
51.470588
106
0.708952
0
0
0
0
0
0
0
0
335
0.127522
5d66c8be8ed85e591a27c5733a7d2e134250bc39
9,393
py
Python
netbox/extras/forms/filtersets.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
netbox/extras/forms/filtersets.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
netbox/extras/forms/filtersets.py
cybarox/netbox
ea197eff5f4fe925bb354d1375912decd81752bd
[ "Apache-2.0" ]
null
null
null
from django import forms from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.utils.translation import gettext as _ from dcim.models import DeviceRole, DeviceType, Platform, Region, Site, SiteGroup from extras.choices import * from extras.models import * from extras.utils import FeatureQuery from netbox.forms.base import NetBoxModelFilterSetForm from tenancy.models import Tenant, TenantGroup from utilities.forms import ( add_blank_choice, APISelectMultiple, BOOLEAN_WITH_BLANK_CHOICES, ContentTypeChoiceField, ContentTypeMultipleChoiceField, DateTimePicker, DynamicModelMultipleChoiceField, FilterForm, MultipleChoiceField, StaticSelect, TagFilterField, ) from virtualization.models import Cluster, ClusterGroup, ClusterType __all__ = ( 'ConfigContextFilterForm', 'CustomFieldFilterForm', 'CustomLinkFilterForm', 'ExportTemplateFilterForm', 'JournalEntryFilterForm', 'LocalConfigContextFilterForm', 'ObjectChangeFilterForm', 'TagFilterForm', 'WebhookFilterForm', ) class CustomFieldFilterForm(FilterForm): fieldsets = ( (None, ('q',)), ('Attributes', ('type', 'content_types', 'weight', 'required')), ) content_types = ContentTypeMultipleChoiceField( queryset=ContentType.objects.all(), limit_choices_to=FeatureQuery('custom_fields'), required=False ) type = MultipleChoiceField( choices=CustomFieldTypeChoices, required=False, label=_('Field type') ) weight = forms.IntegerField( required=False ) required = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) class CustomLinkFilterForm(FilterForm): fieldsets = ( (None, ('q',)), ('Attributes', ('content_type', 'enabled', 'new_window', 'weight')), ) content_type = ContentTypeChoiceField( queryset=ContentType.objects.all(), limit_choices_to=FeatureQuery('custom_links'), required=False ) enabled = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) new_window = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) weight = forms.IntegerField( required=False ) class ExportTemplateFilterForm(FilterForm): fieldsets = ( (None, ('q',)), ('Attributes', ('content_type', 'mime_type', 'file_extension', 'as_attachment')), ) content_type = ContentTypeChoiceField( queryset=ContentType.objects.all(), limit_choices_to=FeatureQuery('export_templates'), required=False ) mime_type = forms.CharField( required=False, label=_('MIME type') ) file_extension = forms.CharField( required=False ) as_attachment = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) class WebhookFilterForm(FilterForm): fieldsets = ( (None, ('q',)), ('Attributes', ('content_types', 'http_method', 'enabled')), ('Events', ('type_create', 'type_update', 'type_delete')), ) content_types = ContentTypeMultipleChoiceField( queryset=ContentType.objects.all(), limit_choices_to=FeatureQuery('webhooks'), required=False ) http_method = MultipleChoiceField( choices=WebhookHttpMethodChoices, required=False, label=_('HTTP method') ) enabled = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) type_create = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) type_update = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) type_delete = forms.NullBooleanField( required=False, widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) class TagFilterForm(FilterForm): model = Tag content_type_id = ContentTypeMultipleChoiceField( queryset=ContentType.objects.filter(FeatureQuery('tags').get_query()), required=False, label=_('Tagged object type') ) class ConfigContextFilterForm(FilterForm): fieldsets = ( (None, ('q', 'tag_id')), ('Location', ('region_id', 'site_group_id', 'site_id')), ('Device', ('device_type_id', 'platform_id', 'role_id')), ('Cluster', ('cluster_type_id', 'cluster_group_id', 'cluster_id')), ('Tenant', ('tenant_group_id', 'tenant_id')) ) region_id = DynamicModelMultipleChoiceField( queryset=Region.objects.all(), required=False, label=_('Regions') ) site_group_id = DynamicModelMultipleChoiceField( queryset=SiteGroup.objects.all(), required=False, label=_('Site groups') ) site_id = DynamicModelMultipleChoiceField( queryset=Site.objects.all(), required=False, label=_('Sites') ) device_type_id = DynamicModelMultipleChoiceField( queryset=DeviceType.objects.all(), required=False, label=_('Device types') ) role_id = DynamicModelMultipleChoiceField( queryset=DeviceRole.objects.all(), required=False, label=_('Roles') ) platform_id = DynamicModelMultipleChoiceField( queryset=Platform.objects.all(), required=False, label=_('Platforms') ) cluster_type_id = DynamicModelMultipleChoiceField( queryset=ClusterType.objects.all(), required=False, label=_('Cluster types'), fetch_trigger='open' ) cluster_group_id = DynamicModelMultipleChoiceField( queryset=ClusterGroup.objects.all(), required=False, label=_('Cluster groups') ) cluster_id = DynamicModelMultipleChoiceField( queryset=Cluster.objects.all(), required=False, label=_('Clusters') ) tenant_group_id = DynamicModelMultipleChoiceField( queryset=TenantGroup.objects.all(), required=False, label=_('Tenant groups') ) tenant_id = DynamicModelMultipleChoiceField( queryset=Tenant.objects.all(), required=False, label=_('Tenant') ) tag_id = DynamicModelMultipleChoiceField( queryset=Tag.objects.all(), required=False, label=_('Tags') ) class LocalConfigContextFilterForm(forms.Form): local_context_data = forms.NullBooleanField( required=False, label=_('Has local config context data'), widget=StaticSelect( choices=BOOLEAN_WITH_BLANK_CHOICES ) ) class JournalEntryFilterForm(NetBoxModelFilterSetForm): model = JournalEntry fieldsets = ( (None, ('q', 'tag')), ('Creation', ('created_before', 'created_after', 'created_by_id')), ('Attributes', ('assigned_object_type_id', 'kind')) ) created_after = forms.DateTimeField( required=False, label=_('After'), widget=DateTimePicker() ) created_before = forms.DateTimeField( required=False, label=_('Before'), widget=DateTimePicker() ) created_by_id = DynamicModelMultipleChoiceField( queryset=User.objects.all(), required=False, label=_('User'), widget=APISelectMultiple( api_url='/api/users/users/', ) ) assigned_object_type_id = DynamicModelMultipleChoiceField( queryset=ContentType.objects.all(), required=False, label=_('Object Type'), widget=APISelectMultiple( api_url='/api/extras/content-types/', ) ) kind = forms.ChoiceField( choices=add_blank_choice(JournalEntryKindChoices), required=False, widget=StaticSelect() ) tag = TagFilterField(model) class ObjectChangeFilterForm(FilterForm): model = ObjectChange fieldsets = ( (None, ('q',)), ('Time', ('time_before', 'time_after')), ('Attributes', ('action', 'user_id', 'changed_object_type_id')), ) time_after = forms.DateTimeField( required=False, label=_('After'), widget=DateTimePicker() ) time_before = forms.DateTimeField( required=False, label=_('Before'), widget=DateTimePicker() ) action = forms.ChoiceField( choices=add_blank_choice(ObjectChangeActionChoices), required=False, widget=StaticSelect() ) user_id = DynamicModelMultipleChoiceField( queryset=User.objects.all(), required=False, label=_('User'), widget=APISelectMultiple( api_url='/api/users/users/', ) ) changed_object_type_id = DynamicModelMultipleChoiceField( queryset=ContentType.objects.all(), required=False, label=_('Object Type'), widget=APISelectMultiple( api_url='/api/extras/content-types/', ) )
29.261682
117
0.639519
8,285
0.88204
0
0
0
0
0
0
1,327
0.141275
5d66ef032fbd2dcf091b5ffde482a5d596613146
1,940
py
Python
bin/write2cly.py
docdiesel/smartmetertools
3b7449c7a9069696af078631aa5440f53d0f57bc
[ "MIT" ]
1
2019-05-30T08:28:31.000Z
2019-05-30T08:28:31.000Z
bin/write2cly.py
docdiesel/smartmetertools
3b7449c7a9069696af078631aa5440f53d0f57bc
[ "MIT" ]
null
null
null
bin/write2cly.py
docdiesel/smartmetertools
3b7449c7a9069696af078631aa5440f53d0f57bc
[ "MIT" ]
null
null
null
#!/usr/bin/python3 ## write2cly.py - reads json (generated by sml_reader.py) from stdin ## - writes values to Corlysis time series InfluxDB ## ## Writes data from smart meter to time series database (InfluxDB) ## at Corlysis.com [1]. You need to configure your database and token ## in the config section. ## ## [1] https://corlysis.com/ ##==== license section ======== ## This code is under MIT License: Copyright (C) 2019 Bernd Künnen ## License details see https://choosealicense.com/licenses/mit/ ##==== config section ======== # define corlysis settings here - set db and token at least cly_base_url = 'https://corlysis.com:8086/write' cly_parameters = { "db": "energy", "u" : "token", "p" : "placeyourtokenhere", "precision": "ms"} # assign readable field names config = { "1.8.0": "Bezug", "2.8.0": "Einspeisung", "16.7.0": "Wirkleistung" } ##==== code section ==== no need to change lines below ==== ##-- import libraries import json, sys, requests import requests import time # load json from stdin try: myjson = json.load(sys.stdin) except: sys.stderr.write('!! error loading json') exit(1) # decode json try: line = "meter_data " # add each meter value to line for obis in myjson['data']: key = config[obis] # set human readable field name value = myjson['data'][obis] # get value from smart meter line += key + '=' + str(value) + ',' # add key=value to insert line # cut off last comma line = line[:-1] # add timestamp as unix timestamp in ms line += ' ' + str(int(time.time()*1000)) #+ '\n' # post data into time series database; http response should be 204 r = requests.post(cly_base_url, params=cly_parameters, data=line) if r.status_code != 204 : sys.stderr.write(r.status_code) sys.stderr.write(r.content) # catch if input is no valid json except: sys.stderr.write('!!error: no data block in json') exit(2)
25.526316
71
0.652062
0
0
0
0
0
0
0
0
1,263
0.650696
5d672137217c3f190c65f38cc034a58e8ab7815b
1,440
py
Python
dns/rdtypes/ANY/__init__.py
Ashiq5/dnspython
5449af5318d88bada34f661247f3bcb16f58f057
[ "ISC" ]
null
null
null
dns/rdtypes/ANY/__init__.py
Ashiq5/dnspython
5449af5318d88bada34f661247f3bcb16f58f057
[ "ISC" ]
null
null
null
dns/rdtypes/ANY/__init__.py
Ashiq5/dnspython
5449af5318d88bada34f661247f3bcb16f58f057
[ "ISC" ]
null
null
null
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Class ANY (generic) rdata type classes.""" __all__ = [ 'AFSDB', 'AMTRELAY', 'AVC', 'CAA', 'CDNSKEY', 'CDS', 'CERT', 'CNAME', 'CSYNC', 'DLV', 'DNAME', 'DNSKEY', 'DS', 'EUI48', 'EUI64', 'GPOS', 'HINFO', 'HIP', 'ISDN', 'LOC', 'MX', 'NINFO', 'NS', 'NSEC', 'NSEC3', 'NSEC3PARAM', 'OPENPGPKEY', 'OPT', 'PTR', 'RP', 'RRSIG', 'RT', 'SMIMEA', 'SOA', 'SPF', 'SSHFP', 'TKEY', 'TLSA', 'TSIG', 'TXT', 'URI', 'X25', ]
22.5
75
0.620833
0
0
0
0
0
0
0
0
1,155
0.802083
5d67812696614c7eac2050cda2d994e16e9201d7
10,519
py
Python
01_test.py
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
6653d7abbaafe09fb17768d9902bb77db24945d4
[ "MIT" ]
3
2020-09-18T10:33:37.000Z
2020-11-04T12:53:50.000Z
01_test.py
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
6653d7abbaafe09fb17768d9902bb77db24945d4
[ "MIT" ]
4
2020-09-26T01:07:55.000Z
2022-02-10T01:30:27.000Z
01_test.py
KhubbatulinMark/DCase2020-Task-2-on-Wigner-Ville-transform
6653d7abbaafe09fb17768d9902bb77db24945d4
[ "MIT" ]
null
null
null
""" @file 01_test.py @brief Script for test @author Toshiki Nakamura, Yuki Nikaido, and Yohei Kawaguchi (Hitachi Ltd.) Copyright (C) 2020 Hitachi, Ltd. All right reserved. """ ######################################################################## # import default python-library ######################################################################## import os import glob import csv import re import itertools import sys ######################################################################## ######################################################################## # import additional python-library ######################################################################## import numpy # from import from tqdm import tqdm from sklearn import metrics # original lib import common as com import keras_model ######################################################################## ######################################################################## # load parameter.yaml ######################################################################## param = com.yaml_load() ####################################################################### ######################################################################## # def ######################################################################## def save_csv(save_file_path, save_data): with open(save_file_path, "w", newline="") as f: writer = csv.writer(f, lineterminator='\n') writer.writerows(save_data) def get_machine_id_list_for_test(target_dir, dir_name="test", ext="json"): """ target_dir : str base directory path of "dev_data" or "eval_data" test_dir_name : str (default="test") directory containing test data ext : str (default="wav) file extension of audio files return : machine_id_list : list [ str ] list of machine IDs extracted from the names of test files """ # create test files dir_path = os.path.abspath("{dir}/{dir_name}/*.{ext}".format(dir=target_dir, dir_name=dir_name, ext=ext)) file_paths = sorted(glob.glob(dir_path)) # extract id machine_id_list = sorted(list(set(itertools.chain.from_iterable( [re.findall('id_[0-9][0-9]', ext_id) for ext_id in file_paths])))) return machine_id_list def test_file_list_generator(target_dir, id_name, dir_name="test", prefix_normal="normal", prefix_anomaly="anomaly", ext="json"): """ target_dir : str base directory path of the dev_data or eval_data id_name : str id of wav file in <<test_dir_name>> directory dir_name : str (default="test") directory containing test data prefix_normal : str (default="normal") normal directory name prefix_anomaly : str (default="anomaly") anomaly directory name ext : str (default="wav") file extension of audio files return : if the mode is "development": test_files : list [ str ] file list for test test_labels : list [ boolean ] label info. list for test * normal/anomaly = 0/1 if the mode is "evaluation": test_files : list [ str ] file list for test """ com.logger.info("target_dir : {}".format(target_dir+"_"+id_name)) # development if mode: normal_files = sorted( glob.glob("{dir}/{dir_name}/{prefix_normal}_{id_name}*.{ext}".format(dir=target_dir, dir_name=dir_name, prefix_normal=prefix_normal, id_name=id_name, ext=ext))) normal_labels = numpy.zeros(len(normal_files)) anomaly_files = sorted( glob.glob("{dir}/{dir_name}/{prefix_anomaly}_{id_name}*.{ext}".format(dir=target_dir, dir_name=dir_name, prefix_anomaly=prefix_anomaly, id_name=id_name, ext=ext))) anomaly_labels = numpy.ones(len(anomaly_files)) files = numpy.concatenate((normal_files, anomaly_files), axis=0) labels = numpy.concatenate((normal_labels, anomaly_labels), axis=0) com.logger.info("test_file num : {num}".format(num=len(files))) if len(files) == 0: com.logger.exception("no_wav_file!!") print("\n========================================") # evaluation else: files = sorted( glob.glob("{dir}/{dir_name}/*{id_name}*.{ext}".format(dir=target_dir, dir_name=dir_name, id_name=id_name, ext=ext))) labels = None com.logger.info("test_file num : {num}".format(num=len(files))) if len(files) == 0: com.logger.exception("no_wav_file!!") print("\n=========================================") return files, labels ######################################################################## ######################################################################## # main 01_test.py ######################################################################## if __name__ == "__main__": # check mode # "development": mode == True # "evaluation": mode == False mode = com.command_line_chk() if mode is None: sys.exit(-1) # make output result directory os.makedirs(param["result_directory"], exist_ok=True) # load base directory dirs = com.select_dirs(param=param, mode=mode) # initialize lines in csv for AUC and pAUC csv_lines = [] # loop of the base directory for idx, target_dir in enumerate(dirs): print("\n===========================") print("[{idx}/{total}] {dirname}".format(dirname=target_dir, idx=idx+1, total=len(dirs))) machine_type = os.path.split(target_dir)[1] print("============== MODEL LOAD ==============") # set model path model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"], machine_type=machine_type) # load model file if not os.path.exists(model_file): com.logger.error("{} model not found ".format(machine_type)) sys.exit(-1) model = keras_model.load_model(model_file) model.summary() if mode: # results by type csv_lines.append([machine_type]) csv_lines.append(["id", "AUC", "pAUC"]) performance = [] machine_id_list = get_machine_id_list_for_test(target_dir) print(machine_id_list) for id_str in machine_id_list: # load test file test_files, y_true = test_file_list_generator(target_dir, id_str) # setup anomaly score file path anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{id_str}.csv".format( result=param["result_directory"], machine_type=machine_type, id_str=id_str) anomaly_score_list = [] print("\n============== BEGIN TEST FOR A MACHINE ID ==============") y_pred = [0. for k in test_files] for file_idx, file_path in tqdm(enumerate(test_files), total=len(test_files)): try: data = com.file_to_vector_array(file_path, n_mels=param["feature"]["n_mels"], frames=param["feature"]["frames"], n_fft=param["feature"]["n_fft"], hop_length=param["feature"]["hop_length"], power=param["feature"]["power"]) errors = numpy.mean(numpy.square(data - model.predict(data)), axis=1) y_pred[file_idx] = numpy.mean(errors) anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]]) except: com.logger.error("file broken!!: {}".format(file_path)) # save anomaly score save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list) com.logger.info("anomaly score result -> {}".format(anomaly_score_csv)) if mode: # append AUC and pAUC to lists auc = metrics.roc_auc_score(y_true, y_pred) p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"]) csv_lines.append([id_str.split("_", 1)[1], auc, p_auc]) performance.append([auc, p_auc]) com.logger.info("AUC : {}".format(auc)) com.logger.info("pAUC : {}".format(p_auc)) print("\n============ END OF TEST FOR A MACHINE ID ============") if mode: # calculate averages for AUCs and pAUCs averaged_performance = numpy.mean(numpy.array(performance, dtype=float), axis=0) csv_lines.append(["Average"] + list(averaged_performance)) csv_lines.append([]) if mode: # output results result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"]) com.logger.info("AUC and pAUC results -> {}".format(result_path)) save_csv(save_file_path=result_path, save_data=csv_lines)
42.587045
118
0.456412
0
0
0
0
0
0
0
0
4,025
0.382641
5d68067044bd41e0c94f3b4e115e6a6243c834c1
1,247
py
Python
src/text_split/split.py
i1123581321/word_split
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
[ "MIT" ]
null
null
null
src/text_split/split.py
i1123581321/word_split
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
[ "MIT" ]
null
null
null
src/text_split/split.py
i1123581321/word_split
6401cdc37f58aa8718793dd7cb9bf4d3a4b690a4
[ "MIT" ]
null
null
null
import argparse import os parser = argparse.ArgumentParser(description="a simple parser") parser.add_argument("filename", type=str) parser.add_argument("lineno", nargs="+", type=int) parser.add_argument("--same_length", action=argparse.BooleanOptionalAction) def main(): args = parser.parse_args() filename = args.filename linenos = args.lineno same_length = args.same_length linenos = list(map(lambda x: x - 1, linenos)) linenos.sort() results = [] with open(filename, "r", encoding="utf-8") as f: content = f.readlines() if not same_length: start = 0 for lineno in linenos: results.append("".join(content[start:lineno])) start = lineno results.append("".join(content[start:])) else: lineno = linenos[0] + 1 if linenos[0] else 100000 start = 0 while start < len(content): results.append("".join(content[start: start + lineno])) start += lineno name, ext = os.path.splitext(filename) for i, result in enumerate(results): with open(f"{name}-{i + 1:02}{ext}", "w", encoding="utf-8") as f: f.write(result)
30.414634
77
0.585405
0
0
0
0
0
0
0
0
104
0.0834
5d68e4a866db0948470d395c5ba6d5ad5676d177
23,037
py
Python
src/sentry/models/event.py
Ali-Tahir/sentry
aa7b306c5ea671ac002a3524982563679557cb31
[ "BSD-3-Clause" ]
null
null
null
src/sentry/models/event.py
Ali-Tahir/sentry
aa7b306c5ea671ac002a3524982563679557cb31
[ "BSD-3-Clause" ]
null
null
null
src/sentry/models/event.py
Ali-Tahir/sentry
aa7b306c5ea671ac002a3524982563679557cb31
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import import six import string import warnings import pytz from collections import OrderedDict from dateutil.parser import parse as parse_date from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from hashlib import md5 from semaphore.processing import StoreNormalizer from sentry import eventtypes from sentry.db.models import ( BoundedBigIntegerField, BoundedIntegerField, Model, NodeData, NodeField, sane_repr, ) from sentry.db.models.manager import EventManager from sentry.interfaces.base import get_interfaces from sentry.utils import json from sentry.utils.cache import memoize from sentry.utils.canonical import CanonicalKeyDict, CanonicalKeyView from sentry.utils.safe import get_path from sentry.utils.strings import truncatechars class EventDict(CanonicalKeyDict): """ Creating an instance of this dictionary will send the event through basic (Rust-based) type/schema validation called "re-normalization". This is used as a wrapper type for `Event.data` such that creating an event object (or loading it from the DB) will ensure the data fits the type schema. """ def __init__(self, data, skip_renormalization=False, **kwargs): is_renormalized = isinstance(data, EventDict) or ( isinstance(data, NodeData) and isinstance(data.data, EventDict) ) if not skip_renormalization and not is_renormalized: normalizer = StoreNormalizer(is_renormalize=True, enable_trimming=False) data = normalizer.normalize_event(dict(data)) CanonicalKeyDict.__init__(self, data, **kwargs) class EventCommon(object): """ Methods and properties common to both Event and SnubaEvent. """ @classmethod def generate_node_id(cls, project_id, event_id): """ Returns a deterministic node_id for this event based on the project_id and event_id which together are globally unique. The event body should be saved under this key in nodestore so it can be retrieved using the same generated id when we only have project_id and event_id. """ return md5("{}:{}".format(project_id, event_id)).hexdigest() # TODO (alex) We need a better way to cache these properties. functools32 # doesn't quite do the trick as there is a reference bug with unsaved # models. But the current _group_cache thing is also clunky because these # properties need to be stripped out in __getstate__. @property def group(self): from sentry.models import Group if not self.group_id: return None if not hasattr(self, "_group_cache"): self._group_cache = Group.objects.get(id=self.group_id) return self._group_cache @group.setter def group(self, group): self.group_id = group.id self._group_cache = group @property def project(self): from sentry.models import Project if not hasattr(self, "_project_cache"): self._project_cache = Project.objects.get(id=self.project_id) return self._project_cache @project.setter def project(self, project): if project is None: self.project_id = None else: self.project_id = project.id self._project_cache = project def get_interfaces(self): return CanonicalKeyView(get_interfaces(self.data)) @memoize def interfaces(self): return self.get_interfaces() def get_interface(self, name): return self.interfaces.get(name) def get_legacy_message(self): # TODO(mitsuhiko): remove this code once it's unused. It's still # being used by plugin code and once the message rename is through # plugins should instead swithc to the actual message attribute or # this method could return what currently is real_message. return ( get_path(self.data, "logentry", "formatted") or get_path(self.data, "logentry", "message") or self.message ) def get_event_type(self): """ Return the type of this event. See ``sentry.eventtypes``. """ return self.data.get("type", "default") def get_event_metadata(self): """ Return the metadata of this event. See ``sentry.eventtypes``. """ # For some inexplicable reason we have some cases where the data # is completely empty. In that case we want to hobble along # further. return self.data.get("metadata") or {} def get_grouping_config(self): """Returns the event grouping config.""" from sentry.grouping.api import get_grouping_config_dict_for_event_data return get_grouping_config_dict_for_event_data(self.data, self.project) def get_hashes(self, force_config=None): """ Returns the calculated hashes for the event. This uses the stored information if available. Grouping hashes will take into account fingerprinting and checksums. """ # If we have hashes stored in the data we use them, otherwise we # fall back to generating new ones from the data. We can only use # this if we do not force a different config. if force_config is None: hashes = self.data.get("hashes") if hashes is not None: return hashes return filter( None, [x.get_hash() for x in self.get_grouping_variants(force_config).values()] ) def get_grouping_variants(self, force_config=None, normalize_stacktraces=False): """ This is similar to `get_hashes` but will instead return the grouping components for each variant in a dictionary. If `normalize_stacktraces` is set to `True` then the event data will be modified for `in_app` in addition to event variants being created. This means that after calling that function the event data has been modified in place. """ from sentry.grouping.api import get_grouping_variants_for_event, load_grouping_config from sentry.stacktraces.processing import normalize_stacktraces_for_grouping # Forcing configs has two separate modes. One is where just the # config ID is given in which case it's merged with the stored or # default config dictionary if force_config is not None: if isinstance(force_config, six.string_types): stored_config = self.get_grouping_config() config = dict(stored_config) config["id"] = force_config else: config = force_config # Otherwise we just use the same grouping config as stored. if # this is None the `get_grouping_variants_for_event` will fill in # the default. else: config = self.data.get("grouping_config") config = load_grouping_config(config) if normalize_stacktraces: normalize_stacktraces_for_grouping(self.data, config) return get_grouping_variants_for_event(self, config) def get_primary_hash(self): # TODO: This *might* need to be protected from an IndexError? return self.get_hashes()[0] @property def title(self): # also see event_manager.py which inserts this for snuba et = eventtypes.get(self.get_event_type())() return et.get_title(self.get_event_metadata()) @property def culprit(self): # For a while events did not save the culprit if self.group_id: return self.data.get("culprit") or self.group.culprit return self.data.get("culprit") @property def location(self): # also see event_manager.py which inserts this for snuba et = eventtypes.get(self.get_event_type())() return et.get_location(self.get_event_metadata()) @property def real_message(self): # XXX(mitsuhiko): this is a transitional attribute that should be # removed. `message` will be renamed to `search_message` and this # will become `message`. return ( get_path(self.data, "logentry", "formatted") or get_path(self.data, "logentry", "message") or "" ) @property def organization(self): return self.project.organization @property def version(self): return self.data.get("version", "5") @property def ip_address(self): ip_address = get_path(self.data, "user", "ip_address") if ip_address: return ip_address remote_addr = get_path(self.data, "request", "env", "REMOTE_ADDR") if remote_addr: return remote_addr return None @property def tags(self): try: rv = sorted( [ (t, v) for t, v in get_path(self.data, "tags", filter=True) or () if t is not None and v is not None ] ) return rv except ValueError: # at one point Sentry allowed invalid tag sets such as (foo, bar) # vs ((tag, foo), (tag, bar)) return [] # For compatibility, still used by plugins. def get_tags(self): return self.tags def get_tag(self, key): for t, v in self.get_tags(): if t == key: return v return None @property def release(self): return self.get_tag("sentry:release") @property def dist(self): return self.get_tag("sentry:dist") def get_raw_data(self): """Returns the internal raw event data dict.""" return dict(self.data.items()) @property def size(self): return len(json.dumps(dict(self.data))) @property def transaction(self): return self.get_tag("transaction") def get_email_subject(self): template = self.project.get_option("mail:subject_template") if template: template = EventSubjectTemplate(template) else: template = DEFAULT_SUBJECT_TEMPLATE return truncatechars(template.safe_substitute(EventSubjectTemplateData(self)), 128).encode( "utf-8" ) def get_environment(self): from sentry.models import Environment if not hasattr(self, "_environment_cache"): self._environment_cache = Environment.objects.get( organization_id=self.project.organization_id, name=Environment.get_name_or_default(self.get_tag("environment")), ) return self._environment_cache def get_minimal_user(self): """ A minimal 'User' interface object that gives us enough information to render a user badge. """ return self.get_interface("user") def as_dict(self): """Returns the data in normalized form for external consumers.""" # We use a OrderedDict to keep elements ordered for a potential JSON serializer data = OrderedDict() data["event_id"] = self.event_id data["project"] = self.project_id data["release"] = self.release data["dist"] = self.dist data["platform"] = self.platform data["message"] = self.real_message data["datetime"] = self.datetime data["time_spent"] = self.time_spent data["tags"] = [(k.split("sentry:", 1)[-1], v) for (k, v) in self.tags] for k, v in sorted(six.iteritems(self.data)): if k in data: continue if k == "sdk": v = {v_k: v_v for v_k, v_v in six.iteritems(v) if v_k != "client_ip"} data[k] = v # for a long time culprit was not persisted. In those cases put # the culprit in from the group. if data.get("culprit") is None and self.group_id: data["culprit"] = self.group.culprit # Override title and location with dynamically generated data data["title"] = self.title data["location"] = self.location return data # ============================================ # DEPRECATED # ============================================ @property def level(self): # we might want to move to this: # return LOG_LEVELS_MAP.get(self.get_level_display()) or self.group.level if self.group: return self.group.level else: return None def get_level_display(self): # we might want to move to this: # return self.get_tag('level') or self.group.get_level_display() if self.group: return self.group.get_level_display() else: return None # deprecated accessors @property def logger(self): warnings.warn("Event.logger is deprecated. Use Event.tags instead.", DeprecationWarning) return self.get_tag("logger") @property def site(self): warnings.warn("Event.site is deprecated. Use Event.tags instead.", DeprecationWarning) return self.get_tag("site") @property def server_name(self): warnings.warn( "Event.server_name is deprecated. Use Event.tags instead.", DeprecationWarning ) return self.get_tag("server_name") @property def checksum(self): warnings.warn("Event.checksum is no longer used", DeprecationWarning) return "" def error(self): # TODO why is this not a property? warnings.warn("Event.error is deprecated, use Event.title", DeprecationWarning) return self.title error.short_description = _("error") @property def message_short(self): warnings.warn("Event.message_short is deprecated, use Event.title", DeprecationWarning) return self.title class SnubaEvent(EventCommon): """ An event backed by data stored in snuba. This is a readonly event and does not support event creation or save. The basic event data is fetched from snuba, and the event body is fetched from nodestore and bound to the data property in the same way as a regular Event. """ # The minimal list of columns we need to get from snuba to bootstrap an # event. If the client is planning on loading the entire event body from # nodestore anyway, we may as well only fetch the minimum from snuba to # avoid duplicated work. minimal_columns = ["event_id", "group_id", "project_id", "timestamp"] # A list of all useful columns we can get from snuba. selected_columns = minimal_columns + [ "culprit", "location", "message", "platform", "title", "type", # Required to provide snuba-only tags "tags.key", "tags.value", # Required to provide snuba-only 'user' interface "email", "ip_address", "user_id", "username", ] __repr__ = sane_repr("project_id", "group_id") def __init__(self, snuba_values): """ When initializing a SnubaEvent, think about the attributes you might need to access on it. If you only need a few properties, and they are all available in snuba, then you should use `SnubaEvent.selected_colums` (or a subset depending on your needs) But if you know you are going to need the entire event body anyway (which requires a nodestore lookup) you may as well just initialize the event with `SnubaEvent.minimal_colums` and let the rest of of the attributes come from nodestore. """ assert all(k in snuba_values for k in SnubaEvent.minimal_columns) # self.snuba_data is a dict of all the stuff we got from snuba self.snuba_data = snuba_values # self.data is a (lazy) dict of everything we got from nodestore node_id = SnubaEvent.generate_node_id( self.snuba_data["project_id"], self.snuba_data["event_id"] ) self.data = NodeData(None, node_id, data=None, wrapper=EventDict) def __getattr__(self, name): """ Depending on what snuba data this event was initialized with, we may have the data available to return, or we may have to look in the `data` dict (which would force a nodestore load). All unresolved self.foo type accesses will come through here. """ if name in ("_project_cache", "_group_cache", "_environment_cache"): raise AttributeError() if name in self.snuba_data: return self.snuba_data[name] else: return self.data[name] # ============================================ # Snuba-only implementations of properties that # would otherwise require nodestore data. # ============================================ @property def tags(self): """ Override of tags property that uses tags from snuba rather than the nodestore event body. This might be useful for implementing tag deletions without having to rewrite nodestore blobs. """ if "tags.key" in self.snuba_data and "tags.value" in self.snuba_data: keys = getattr(self, "tags.key") values = getattr(self, "tags.value") if keys and values and len(keys) == len(values): return sorted(zip(keys, values)) else: return [] else: return super(SnubaEvent, self).tags def get_minimal_user(self): from sentry.interfaces.user import User return User.to_python( { "id": self.user_id, "email": self.email, "username": self.username, "ip_address": self.ip_address, } ) # If the data for these is available from snuba, we assume # it was already normalized on the way in and we can just return # it, otherwise we defer to EventCommon implementation. def get_event_type(self): if "type" in self.snuba_data: return self.snuba_data["type"] return super(SnubaEvent, self).get_event_type() @property def ip_address(self): if "ip_address" in self.snuba_data: return self.snuba_data["ip_address"] return super(SnubaEvent, self).ip_address @property def title(self): if "title" in self.snuba_data: return self.snuba_data["title"] return super(SnubaEvent, self).title @property def culprit(self): if "culprit" in self.snuba_data: return self.snuba_data["culprit"] return super(SnubaEvent, self).culprit @property def location(self): if "location" in self.snuba_data: return self.snuba_data["location"] return super(SnubaEvent, self).location # ==================================================== # Snuba implementations of the django fields on Event # ==================================================== @property def datetime(self): """ Reconstruct the datetime of this event from the snuba timestamp """ # dateutil seems to use tzlocal() instead of UTC even though the string # ends with '+00:00', so just replace the TZ with UTC because we know # all timestamps from snuba are UTC. return parse_date(self.timestamp).replace(tzinfo=pytz.utc) @property def time_spent(self): return None @property def message(self): if "message" in self.snuba_data: return self.snuba_data["message"] return self.data.get("message") @property def platform(self): if "platform" in self.snuba_data: return self.snuba_data["platform"] return self.data.get("platform") @property def id(self): # Because a snuba event will never have a django row id, just return # the hex event_id here. We should be moving to a world where we never # have to reference the row id anyway. return self.event_id def save(self): raise NotImplementedError class Event(EventCommon, Model): """ An event backed by data stored in postgres. """ __core__ = False group_id = BoundedBigIntegerField(blank=True, null=True) event_id = models.CharField(max_length=32, null=True, db_column="message_id") project_id = BoundedBigIntegerField(blank=True, null=True) message = models.TextField() platform = models.CharField(max_length=64, null=True) datetime = models.DateTimeField(default=timezone.now, db_index=True) time_spent = BoundedIntegerField(null=True) data = NodeField( blank=True, null=True, ref_func=lambda x: x.project_id or x.project.id, ref_version=2, wrapper=EventDict, ) objects = EventManager() class Meta: app_label = "sentry" db_table = "sentry_message" verbose_name = _("message") verbose_name_plural = _("messages") unique_together = (("project_id", "event_id"),) index_together = (("group_id", "datetime"),) __repr__ = sane_repr("project_id", "group_id") def __getstate__(self): state = Model.__getstate__(self) # do not pickle cached info. We want to fetch this on demand # again. In particular if we were to pickle interfaces we would # pickle a CanonicalKeyView which old sentry workers do not know # about state.pop("_project_cache", None) state.pop("_environment_cache", None) state.pop("_group_cache", None) state.pop("interfaces", None) return state class EventSubjectTemplate(string.Template): idpattern = r"(tag:)?[_a-z][_a-z0-9]*" class EventSubjectTemplateData(object): tag_aliases = {"release": "sentry:release", "dist": "sentry:dist", "user": "sentry:user"} def __init__(self, event): self.event = event def __getitem__(self, name): if name.startswith("tag:"): name = name[4:] value = self.event.get_tag(self.tag_aliases.get(name, name)) if value is None: raise KeyError return six.text_type(value) elif name == "project": return self.event.project.get_full_name() elif name == "projectID": return self.event.project.slug elif name == "shortID" and self.event.group_id: return self.event.group.qualified_short_id elif name == "orgID": return self.event.organization.slug elif name == "title": return self.event.title raise KeyError DEFAULT_SUBJECT_TEMPLATE = EventSubjectTemplate("$shortID - $title")
33.778592
99
0.623996
22,080
0.958458
0
0
7,016
0.304554
0
0
8,813
0.382558
5d6b89b1e8e521a2e81232c6e63ef4c5529270e8
2,920
py
Python
Assignment3/src/data/make_nowcast_dataset.py
shikashyam/BigDataSystemsCoursework
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
[ "MIT" ]
null
null
null
Assignment3/src/data/make_nowcast_dataset.py
shikashyam/BigDataSystemsCoursework
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
[ "MIT" ]
null
null
null
Assignment3/src/data/make_nowcast_dataset.py
shikashyam/BigDataSystemsCoursework
d7f9cabbfb18b0e3303292b65af1ffd530e24ccc
[ "MIT" ]
4
2022-02-12T23:59:54.000Z
2022-02-16T22:53:32.000Z
""" Makes training and test dataset for nowcasting model using SEVIR """ # -*- coding: utf-8 -*- import argparse import logging import os import h5py os.environ["HDF5_USE_FILE_LOCKING"]='FALSE' import sys import numpy as np import tensorflow as tf from nowcast_generator import get_nowcast_test_generator # parser = argparse.ArgumentParser(description='Make nowcast training & test datasets using SEVIR') # parser.add_argument('--sevir_data', type=str, help='location of SEVIR dataset',default='../../data/sevir') # parser.add_argument('--sevir_catalog', type=str, help='location of SEVIR dataset',default='../../data/CATALOG.csv') # parser.add_argument('--output_location', type=str, help='location of SEVIR dataset',default='../../data/interim') # parser.add_argument('--n_chunks', type=int, help='Number of chucks to use (increase if memory limited)',default=10) #args = parser.parse_args() def generate_data(sevir_location,sevir_catalog,output_location,n_chunks=10): """ Runs data processing scripts to extract training set from SEVIR """ logger = logging.getLogger(__name__) logger.info('making final data set from raw data') #trn_generator = get_nowcast_train_generator(sevir_catalog=args.sevir_catalog,sevir_location=args.sevir_data) tst_generator = get_nowcast_test_generator(sevir_catalog,sevir_location) #ogger.info('Reading/writing training data to %s' % ('%s/nowcast_training.h5' % args.output_location)) #read_write_chunks('%s/nowcast_training.h5' % args.output_location,trn_generator,args.n_chunks) logger.info('Reading/writing testing data to ' + output_location+'/nowcast_testing.h5') read_write_chunks(output_location+'/nowcast_testing.h5',tst_generator,n_chunks) def read_write_chunks( filename, generator, n_chunks ): logger = logging.getLogger(__name__) chunksize = len(generator)//n_chunks # get first chunk logger.info('Gathering chunk 0/%s:' % n_chunks) X,Y=generator.load_batches(n_batches=chunksize,offset=0,progress_bar=True) # Create datasets with h5py.File(filename, 'w') as hf: hf.create_dataset('IN', data=X[0], maxshape=(None,X[0].shape[1],X[0].shape[2],X[0].shape[3])) hf.create_dataset('OUT', data=Y[0], maxshape=(None,Y[0].shape[1],Y[0].shape[2],Y[0].shape[3])) # Gather other chunks for c in range(1,n_chunks+1): offset = c*chunksize n_batches = min(chunksize,len(generator)-offset) if n_batches<0: # all done break logger.info('Gathering chunk %d/%s:' % (c,n_chunks)) X,Y=generator.load_batches(n_batches=n_batches,offset=offset,progress_bar=True) with h5py.File(filename, 'a') as hf: hf['IN'].resize((hf['IN'].shape[0] + X[0].shape[0]), axis = 0) hf['OUT'].resize((hf['OUT'].shape[0] + Y[0].shape[0]), axis = 0) hf['IN'][-X[0].shape[0]:] = X[0] hf['OUT'][-Y[0].shape[0]:] = Y[0]
42.941176
117
0.7
0
0
0
0
0
0
0
0
1,361
0.466096
5d6e1f190b9f10fc581499ca4a914cfa2670ffb2
9,576
py
Python
blender-plugin/import_cast.py
rtasan/ApexCastImporter
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
[ "MIT" ]
null
null
null
blender-plugin/import_cast.py
rtasan/ApexCastImporter
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
[ "MIT" ]
3
2021-05-24T12:29:43.000Z
2021-05-28T13:07:39.000Z
blender-plugin/import_cast.py
rtasan/ApexCastImporter
17f833ab8ff9757e295ca8eadb0cb210bfdd6476
[ "MIT" ]
null
null
null
# The Original importer was created by Nick # Copyright (c) 2020 Nick import bpy import bmesh import os import array import math from mathutils import * from bpy_extras.image_utils import load_image from .cast import Cast, Model, Animation, Curve, NotificationTrack, Mesh, Skeleton, Bone, Material, File def utilityBuildPath(root, asset): if os.path.isabs(asset): return asset root = os.path.dirname(root) return os.path.join(root, asset) def utilityAssignBSDFMaterialSlots(material, slots, path): material.node_tree.nodes.remove( material.node_tree.nodes["Principled BSDF"]) shader = material.node_tree.nodes.new("ShaderNodeGroup") output = material.node_tree.nodes['Material Output'] # グループシェーダーを作成 shader.node_tree = bpy.data.node_groups['S/G-Blender'] #テクスチャを定義 switcher = { "albedo": "Diffuse map", "diffuse": "Diffuse map", "specular": "Specular map", "ao": "AO map", "cavity": "Cavity map", "gloss": "Glossiness map", "normal": "Normal map", "emissive": "Emission input" } # Loop and connect the slots for slot in slots: connection = slots[slot] if not connection.__class__ is File: continue if not slot in switcher: continue texture = material.node_tree.nodes.new("ShaderNodeTexImage") #画像ノードを作成 try: texture.image = bpy.data.images.load( utilityBuildPath(path, connection.Path())) #画像を読み込み except RuntimeError: pass if texture.image: material.node_tree.links.new( shader.inputs[switcher[slot]], texture.outputs["Color"]) material.node_tree.links.new(shader.outputs[0], output.inputs[0]) # texture.outputのカラーとinputをつなげる(link) else: material.node_tree.nodes.remove(texture) def importSkeletonNode(name, skeleton): if skeleton is None: return None armature = bpy.data.armatures.new("Joints") armature.display_type = "STICK" skeletonObj = bpy.data.objects.new(name, armature) skeletonObj.show_in_front = True bpy.context.view_layer.active_layer_collection.collection.objects.link( skeletonObj) bpy.context.view_layer.objects.active = skeletonObj bpy.ops.object.mode_set(mode='EDIT') bones = skeleton.Bones() handles = [None] * len(bones) matrices = {} for i, bone in enumerate(bones): newBone = armature.edit_bones.new(bone.Name()) newBone.tail = 0, 0.05, 0 # I am sorry but blender sucks tempQuat = bone.LocalRotation() # Also sucks, WXYZ? => XYZW master race matRotation = Quaternion( (tempQuat[3], tempQuat[0], tempQuat[1], tempQuat[2])).to_matrix().to_4x4() matTranslation = Matrix.Translation(Vector(bone.LocalPosition())) matrices[bone.Name()] = matTranslation @ matRotation handles[i] = newBone for i, bone in enumerate(bones): if bone.ParentIndex() > -1: handles[i].parent = handles[bone.ParentIndex()] bpy.context.view_layer.objects.active = skeletonObj bpy.ops.object.mode_set(mode='POSE') for bone in skeletonObj.pose.bones: bone.matrix_basis.identity() bone.matrix = matrices[bone.name] bpy.ops.pose.armature_apply() return skeletonObj def importMaterialNode(path, material): # If you already created the material, ignore this materialNew = bpy.data.materials.get(material.Name()) if materialNew is not None: return material.Name(), materialNew materialNew = bpy.data.materials.new(name=material.Name()) materialNew.use_nodes = True # Blender really only wants a BSDF shader node # so we're gonna give it one utilityAssignBSDFMaterialSlots(materialNew, material.Slots(), path) return material.Name(), materialNew def importModelNode(model, path): # Extract the name of this model from the path modelName = os.path.splitext(os.path.basename(path))[0] # Import skeleton for binds, materials for meshes skeletonObj = importSkeletonNode(modelName, model.Skeleton()) materialArray = {key: value for (key, value) in ( importMaterialNode(path, x) for x in model.Materials())} meshes = model.Meshes() for mesh in meshes: newMesh = bpy.data.meshes.new("polySurfaceMesh") blendMesh = bmesh.new() vertexColorLayer = blendMesh.loops.layers.color.new("color1") vertexWeightLayer = blendMesh.verts.layers.deform.new() vertexUVLayers = [blendMesh.loops.layers.uv.new( "map%d" % x) for x in range(mesh.UVLayerCount())] vertexPositions = mesh.VertexPositionBuffer() for x in range(0, len(vertexPositions), 3): blendMesh.verts.new( Vector((vertexPositions[x], vertexPositions[x + 1], vertexPositions[x + 2]))) blendMesh.verts.ensure_lookup_table() faceLookupMap = [1, 2, 0] vertexNormalLayer = [] vertexNormals = mesh.VertexNormalBuffer() vertexColors = mesh.VertexColorBuffer() vertexUVs = [mesh.VertexUVLayerBuffer( x) for x in range(mesh.UVLayerCount())] def vertexToFaceVertex(face): for x, loop in enumerate(face.loops): vertexIndex = faces[faceStart + faceLookupMap[x]] if vertexNormals is not None: vertexNormalLayer.append((vertexNormals[vertexIndex * 3], vertexNormals[( vertexIndex * 3) + 1], vertexNormals[(vertexIndex * 3) + 2])) for uvLayer in range(mesh.UVLayerCount()): uv = Vector( (vertexUVs[uvLayer][vertexIndex * 2], vertexUVs[uvLayer][(vertexIndex * 2) + 1])) uv.y = 1.0 - uv.y loop[vertexUVLayers[uvLayer]].uv = uv if vertexColors is not None: loop[vertexColorLayer] = [ (vertexColors[vertexIndex] >> i & 0xff) / 255.0 for i in (24, 16, 8, 0)] faces = mesh.FaceBuffer() for faceStart in range(0, len(faces), 3): indices = [blendMesh.verts[faces[faceStart + faceLookupMap[0]]], blendMesh.verts[faces[faceStart + faceLookupMap[1]]], blendMesh.verts[faces[faceStart + faceLookupMap[2]]]] try: newLoop = blendMesh.faces.new(indices) except ValueError: continue else: vertexToFaceVertex(newLoop) maximumInfluence = mesh.MaximumWeightInfluence() if maximumInfluence > 0: weightBoneBuffer = mesh.VertexWeightBoneBuffer() weightValueBuffer = mesh.VertexWeightValueBuffer() for x, vert in enumerate(blendMesh.verts): if (weightValueBuffer[x * maximumInfluence] > 0.0): vert[vertexWeightLayer][weightBoneBuffer[x * maximumInfluence] ] = weightValueBuffer[x * maximumInfluence] blendMesh.to_mesh(newMesh) newMesh.create_normals_split() if len(vertexNormalLayer) > 0: for x, _loop in enumerate(newMesh.loops): newMesh.loops[x].normal = vertexNormalLayer[x] newMesh.validate(clean_customdata=False) clnors = array.array('f', [0.0] * (len(newMesh.loops) * 3)) newMesh.loops.foreach_get("normal", clnors) newMesh.polygons.foreach_set( "use_smooth", [True] * len(newMesh.polygons)) newMesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3))) newMesh.use_auto_smooth = True meshObj = bpy.data.objects.new("CastMesh", newMesh) bpy.context.view_layer.active_layer_collection.collection.objects.link( meshObj) bpy.context.view_layer.objects.active = meshObj meshMaterial = mesh.Material() if meshMaterial is not None: meshObj.data.materials.append(materialArray[meshMaterial.Name()]) for bone in skeletonObj.pose.bones: meshObj.vertex_groups.new(name=bone.name) meshObj.parent = skeletonObj modifier = meshObj.modifiers.new('Armature Rig', 'ARMATURE') modifier.object = skeletonObj modifier.use_bone_envelopes = False modifier.use_vertex_groups = True def importRootNode(node, path): for child in node.ChildrenOfType(Model): importModelNode(child, path) # for child in node.ChildrenOfType(Animation): # importAnimationNode(child, path) def importCast(path): cast = Cast() cast.load(path) for root in cast.Roots(): importRootNode(root, path) def load(self, context, filepath=""): # シェーダーをアペンド shader_path = bpy.context.preferences.addons[__package__].preferences.apex_sgshader_path try: file_path = shader_path inner_path = 'NodeTree' object_name = 'S/G-Blender' bpy.ops.wm.append( filepath=os.path.join(file_path, inner_path, object_name), directory=os.path.join(file_path, inner_path), filename=object_name ) except: self.report({'ERROR'}, 'Set the Shader path in AddonPreferences first.') return False # Parse and load cast nodes importCast(filepath) # Update the scene, reset view mode before returning. bpy.context.view_layer.update() bpy.ops.object.mode_set(mode="OBJECT") return True
33.957447
130
0.631788
0
0
0
0
0
0
0
0
1,210
0.124923
5d6e47beb4576bf2e083ccdcb792c2e2830c83c4
50,279
py
Python
user_program/usb4vc_ui.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
78
2022-02-07T16:48:11.000Z
2022-03-31T12:25:35.000Z
user_program/usb4vc_ui.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
1
2022-02-26T20:16:08.000Z
2022-02-26T20:24:04.000Z
user_program/usb4vc_ui.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
1
2022-02-24T03:34:15.000Z
2022-02-24T03:34:15.000Z
# https://luma-oled.readthedocs.io/en/latest/software.html import os import sys import time import threading import usb4vc_oled from luma.core.render import canvas import RPi.GPIO as GPIO import usb4vc_usb_scan import usb4vc_shared import usb4vc_show_ev import usb4vc_check_update import json import subprocess from subprocess import Popen, PIPE from usb4vc_shared import this_app_dir_path from usb4vc_shared import config_dir_path from usb4vc_shared import firmware_dir_path from usb4vc_shared import temp_dir_path from usb4vc_shared import ensure_dir from usb4vc_shared import i2c_bootloader_pbid from usb4vc_shared import usb_bootloader_pbid config_file_path = os.path.join(config_dir_path, 'config.json') ensure_dir(this_app_dir_path) ensure_dir(config_dir_path) ensure_dir(firmware_dir_path) ensure_dir(temp_dir_path) PLUS_BUTTON_PIN = 27 MINUS_BUTTON_PIN = 19 ENTER_BUTTON_PIN = 22 SHUTDOWN_BUTTON_PIN = 21 PBOARD_RESET_PIN = 25 PBOARD_BOOT0_PIN = 12 SLEEP_LED_PIN = 26 GPIO.setmode(GPIO.BCM) GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN) GPIO.setup(SLEEP_LED_PIN, GPIO.OUT) GPIO.output(SLEEP_LED_PIN, GPIO.LOW) SPI_MOSI_MAGIC = 0xde SPI_MOSI_MSG_TYPE_SET_PROTOCOL = 2 set_protocl_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_SET_PROTOCOL] + [0]*29 class my_button(object): def __init__(self, bcm_pin): super(my_button, self).__init__() self.pin_number = bcm_pin GPIO.setup(self.pin_number, GPIO.IN, pull_up_down=GPIO.PUD_UP) self.prev_state = GPIO.input(self.pin_number) def is_pressed(self): result = False current_state = GPIO.input(self.pin_number) if self.prev_state == 1 and current_state == 0: result = True self.prev_state = current_state return result PBOARD_ID_UNKNOWN = 0 PBOARD_ID_IBMPC = 1 PBOARD_ID_ADB = 2 pboard_info_spi_msg = [0] * 32 this_pboard_id = PBOARD_ID_UNKNOWN USBGP_BTN_SOUTH = 0x130 USBGP_BTN_EAST = 0x131 USBGP_BTN_C = 0x132 USBGP_BTN_NORTH = 0x133 USBGP_BTN_WEST = 0x134 USBGP_BTN_Z = 0x135 USBGP_BTN_TL = 0x136 USBGP_BTN_TR = 0x137 USBGP_BTN_TL2 = 0x138 USBGP_BTN_TR2 = 0x139 USBGP_BTN_SELECT = 0x13a USBGP_BTN_START = 0x13b USBGP_BTN_MODE = 0x13c USBGP_BTN_THUMBL = 0x13d USBGP_BTN_THUMBR = 0x13e USBGP_BTN_A = USBGP_BTN_SOUTH USBGP_BTN_B = USBGP_BTN_EAST USBGP_BTN_X = USBGP_BTN_NORTH USBGP_BTN_Y = USBGP_BTN_WEST USBGP_ABS_X = 0x00 # left stick X USBGP_ABS_Y = 0x01 # left stick Y USBGP_ABS_Z = 0x02 # left analog trigger USBGP_ABS_RX = 0x03 # right stick X USBGP_ABS_RY = 0x04 # right stick Y USBGP_ABS_RZ = 0x05 # right analog trigger USBGP_ABS_HAT0X = 0x10 # D-pad X USBGP_ABS_HAT0Y = 0x11 # D-pad Y GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING = { "MAPPING_TYPE": "DEFAULT_MOUSE_KB", 'BTN_TL': {'code': 'BTN_LEFT'}, 'BTN_TR': {'code': 'BTN_RIGHT'}, 'BTN_TL2': {'code': 'BTN_LEFT'}, 'BTN_TR2': {'code': 'BTN_RIGHT'}, 'ABS_X': {'code': 'REL_X'}, 'ABS_Y': {'code': 'REL_Y'}, 'ABS_HAT0X': {'code': 'KEY_RIGHT', 'code_neg': 'KEY_LEFT'}, 'ABS_HAT0Y': {'code': 'KEY_DOWN', 'code_neg': 'KEY_UP'} } IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING = { "MAPPING_TYPE": "DEFAULT_15PIN", # buttons to buttons 'BTN_SOUTH': {'code':'IBM_GGP_BTN_1'}, 'BTN_NORTH': {'code':'IBM_GGP_BTN_2'}, 'BTN_EAST': {'code':'IBM_GGP_BTN_3'}, 'BTN_WEST': {'code':'IBM_GGP_BTN_4'}, 'BTN_TL': {'code':'IBM_GGP_BTN_1'}, 'BTN_TR': {'code':'IBM_GGP_BTN_2'}, 'BTN_Z': {'code':'IBM_GGP_BTN_3'}, 'BTN_C': {'code':'IBM_GGP_BTN_4'}, 'BTN_TL2': {'code':'IBM_GGP_BTN_1'}, 'BTN_TR2': {'code':'IBM_GGP_BTN_2'}, # analog axes to analog axes 'ABS_X': {'code':'IBM_GGP_JS1_X'}, 'ABS_Y': {'code':'IBM_GGP_JS1_Y'}, 'ABS_HAT0X': {'code':'IBM_GGP_JS1_X'}, 'ABS_HAT0Y': {'code':'IBM_GGP_JS1_Y'}, 'ABS_RX': {'code':'IBM_GGP_JS2_X'}, 'ABS_RY': {'code':'IBM_GGP_JS2_Y'}, } PROTOCOL_OFF = {'pid':0, 'display_name':"OFF"} PROTOCOL_AT_PS2_KB = {'pid':1, 'display_name':"AT/PS2"} PROTOCOL_XT_KB = {'pid':2, 'display_name':"PC XT"} PROTOCOL_ADB_KB = {'pid':3, 'display_name':"ADB"} PROTOCOL_PS2_MOUSE_NORMAL = {'pid':4, 'display_name':"PS/2"} PROTOCOL_MICROSOFT_SERIAL_MOUSE = {'pid':5, 'display_name':"Microsft Serial"} PROTOCOL_ADB_MOUSE = {'pid':6, 'display_name':"ADB"} PROTOCOL_15PIN_GAMEPORT_GAMEPAD = {'pid':7, 'display_name':"Generic 15-Pin", 'mapping':IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING} PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE = {'pid':8, 'display_name':"MouseSys Serial"} PROTOCOL_USB_GP_TO_MOUSE_KB = {'pid':0, 'display_name':'Mouse & KB', 'mapping':GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING} PROTOCOL_RAW_KEYBOARD = {'pid':125, 'display_name':"Raw data"} PROTOCOL_RAW_MOUSE = {'pid':126, 'display_name':"Raw data"} PROTOCOL_RAW_GAMEPAD = {'pid':127, 'display_name':"Raw data"} custom_profile_list = [] try: onlyfiles = [f for f in os.listdir(config_dir_path) if os.path.isfile(os.path.join(config_dir_path, f))] json_map_files = [os.path.join(config_dir_path, x) for x in onlyfiles if x.lower().startswith('usb4vc_map') and x.lower().endswith(".json")] for item in json_map_files: print('loading json file:', item) with open(item) as json_file: custom_profile_list.append(json.load(json_file)) except Exception as e: print('exception json load:', e) def get_list_of_usb_drive(): usb_drive_set = set() try: usb_drive_path = subprocess.getoutput(f"timeout 2 df -h | grep -i usb").replace('\r', '').split('\n') for item in [x for x in usb_drive_path if len(x) > 2]: usb_drive_set.add(os.path.join(item.split(' ')[-1], 'usb4vc')) except Exception as e: print("exception get_list_of_usb_drive:", e) return usb_drive_set def copy_debug_log(): usb_drive_set = get_list_of_usb_drive() if len(usb_drive_set) == 0: return False for this_path in usb_drive_set: if os.path.isdir(this_path): print('copying debug log to', this_path) os.system(f'sudo cp -v /home/pi/usb4vc/usb4vc_debug_log.txt {this_path}') return True def check_usb_drive(): usb_drive_set = get_list_of_usb_drive() if len(usb_drive_set) == 0: return False, 'USB Drive Not Found' for this_path in usb_drive_set: usb_config_path = os.path.join(this_path, 'config') if not os.path.isdir(usb_config_path): usb_config_path = None if usb_config_path is not None: return True, usb_config_path return False, 'No Update Data Found' def get_pbid_and_version(dfu_file_name): pbid = None try: pbid = int(dfu_file_name.split('PBID')[-1].split('_')[0]) except Exception as e: print("exception fw pbid parse:", e) fw_ver_tuple = None try: fw_ver = dfu_file_name.lower().split('_v')[-1].split('.')[0].split('_') fw_ver_tuple = (int(fw_ver[0]), int(fw_ver[1]), int(fw_ver[2])) except Exception as e: print('exception fw ver parse:', e) return pbid, fw_ver_tuple def reset_pboard(): print("resetting protocol board...") GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN) GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT) GPIO.output(PBOARD_RESET_PIN, GPIO.LOW) time.sleep(0.05) GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) time.sleep(0.05) print("done") def enter_dfu(): # RESET LOW: Enter reset GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT) GPIO.output(PBOARD_RESET_PIN, GPIO.LOW) time.sleep(0.05) # BOOT0 HIGH: Boot into DFU mode GPIO.setup(PBOARD_BOOT0_PIN, GPIO.OUT) GPIO.output(PBOARD_BOOT0_PIN, GPIO.HIGH) time.sleep(0.05) # Release RESET, BOOT0 still HIGH, STM32 now in DFU mode GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) time.sleep(1.5) def exit_dfu(): # Release BOOT0 GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN) # Activate RESET GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT) GPIO.output(PBOARD_RESET_PIN, GPIO.LOW) time.sleep(0.05) # Release RESET, BOOT0 is LOW, STM32 boots in normal mode GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) time.sleep(1.5) def fw_update(fw_path, pbid): is_updated = False if pbid in i2c_bootloader_pbid and fw_path.lower().endswith('.hex'): enter_dfu() os.system(f'sudo stm32flash -w {fw_path} -a 0x3b /dev/i2c-1') is_updated = True elif pbid in usb_bootloader_pbid and fw_path.lower().endswith('.dfu'): enter_dfu() lsusb_str = subprocess.getoutput("lsusb") if 'in DFU'.lower() not in lsusb_str.lower(): with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Connect a USB cable", usb4vc_oled.font_regular, 0, draw) usb4vc_oled.oled_print_centered("from P-Card to RPi", usb4vc_oled.font_regular, 10, draw) usb4vc_oled.oled_print_centered("and try again", usb4vc_oled.font_regular, 20, draw) time.sleep(4) else: os.system(f'sudo dfu-util --device ,0483:df11 -a 0 -D {fw_path}') is_updated = True exit_dfu() return is_updated def update_pboard_firmware(this_pid): onlyfiles = [f for f in os.listdir(firmware_dir_path) if os.path.isfile(os.path.join(firmware_dir_path, f))] firmware_files = [x for x in onlyfiles if x.startswith("PBFW_") and (x.lower().endswith(".dfu") or x.lower().endswith(".hex")) and "PBID" in x] this_pboard_version_tuple = (pboard_info_spi_msg[5], pboard_info_spi_msg[6], pboard_info_spi_msg[7]) for item in firmware_files: pbid, fw_ver_tuple = get_pbid_and_version(item) if pbid is None or fw_ver_tuple is None: continue print('update_pboard_firmware:', this_pid, this_pboard_version_tuple, fw_ver_tuple) if pbid == this_pid and fw_ver_tuple > this_pboard_version_tuple: print("DOING IT NOW") with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Loading Firmware:", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(item.strip("PBFW_").strip(".dfu").strip(".hex"), usb4vc_oled.font_regular, 16, draw) if fw_update(os.path.join(firmware_dir_path, item), this_pid): return True return False def update_from_usb(usb_config_path): if usb_config_path is not None: os.system(f'cp -v /home/pi/usb4vc/config/config.json {usb_config_path}') os.system('mv -v /home/pi/usb4vc/config/config.json /home/pi/usb4vc/config.json') os.system('rm -rfv /home/pi/usb4vc/config/*') os.system(f"cp -v {os.path.join(usb_config_path, '*')} /home/pi/usb4vc/config") os.system("mv -v /home/pi/usb4vc/config.json /home/pi/usb4vc/config/config.json") ibmpc_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_AT_PS2_KB, PROTOCOL_XT_KB] ibmpc_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_PS2_MOUSE_NORMAL, PROTOCOL_MICROSOFT_SERIAL_MOUSE, PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE] ibmpc_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_15PIN_GAMEPORT_GAMEPAD, PROTOCOL_USB_GP_TO_MOUSE_KB] adb_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_KB] adb_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_MOUSE] adb_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_USB_GP_TO_MOUSE_KB] raw_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_KEYBOARD] raw_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_MOUSE] raw_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_GAMEPAD] mouse_sensitivity_list = [1, 1.25, 1.5, 1.75, 0.25, 0.5, 0.75] """ key is protocol card ID conf_dict[pbid]: hw revision current keyboard protocol current mouse protocol current gamepad procotol mouse sensitivity """ configuration_dict = {} LINUX_EXIT_CODE_TIMEOUT = 124 def bt_setup(): rfkill_str = subprocess.getoutput("/usr/sbin/rfkill -n") if 'bluetooth' not in rfkill_str: return 1, "no BT receiver found" os.system('/usr/sbin/rfkill unblock bluetooth') time.sleep(0.1) exit_code = os.system('timeout 1 bluetoothctl agent NoInputNoOutput') >> 8 if exit_code == LINUX_EXIT_CODE_TIMEOUT: return 2, 'bluetoothctl stuck' return 0, '' def scan_bt_devices(timeout_sec = 5): exit_code = os.system(f"timeout {timeout_sec} bluetoothctl --agent NoInputNoOutput scan on") >> 8 if exit_code != LINUX_EXIT_CODE_TIMEOUT: return None, 'scan error' device_str = subprocess.getoutput("bluetoothctl --agent NoInputNoOutput devices") dev_list = [] for line in device_str.replace('\r', '').split('\n'): if 'device' not in line.lower(): continue line_split = line.split(' ', maxsplit=2) # skip if device has no name if len(line_split) < 3 or line_split[2].count('-') == 5: continue dev_list.append((line_split[1], line_split[2])) return dev_list, '' def pair_device(mac_addr): is_ready = False is_sent = False fail_phrases = ['fail', 'error', 'not available', 'excep'] with Popen(["bluetoothctl", "--agent", "NoInputNoOutput"], stdout=PIPE, stdin=PIPE, bufsize=1, universal_newlines=True, shell=True) as p: for line in p.stdout: print(line, end='') line_lo = line.lower() if 'registered' in line_lo: is_ready = True if is_ready is False: continue if '#' in line_lo and is_sent == False: p.stdin.write(f'pair {mac_addr}\n') is_sent = True if 'PIN code:' in line: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Enter PIN code:", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(line.split('PIN code:')[-1], usb4vc_oled.font_medium, 15, draw) if '(yes/no)' in line: p.stdin.write('yes\n') if 'number in 0-999999' in line: return False, "Error: Passkey needed" if 'successful' in line_lo: p.stdin.write('exit\n') return True, 'Success!' for item in fail_phrases: if item in line_lo: p.stdin.write('exit\n') return False, line return False, "wtf" def get_paired_devices(): dev_set = set() try: device_str = subprocess.getoutput(f"timeout 5 bluetoothctl --agent NoInputNoOutput paired-devices") for line in device_str.replace('\r', '').split('\n'): if 'device' not in line.lower(): continue line_split = line.split(' ', maxsplit=2) # skip if device has no name if len(line_split) < 3 or line_split[2].count('-') == 5: continue dev_set.add((line_split[1], line_split[2])) except Exception as e: print('exception get_paired_devices:', e) return dev_set def load_config(): global configuration_dict try: with open(config_file_path) as json_file: temp_dict = json.load(json_file) # json dump all keys as strings, need to convert them back to ints for key in temp_dict: if key.isdigit(): configuration_dict[int(key)] = temp_dict[key] else: configuration_dict[key] = temp_dict[key] except Exception as e: print("exception config load failed!", e) def get_ip_address(): ip_str = subprocess.getoutput("timeout 1 hostname -I") ip_list = [x for x in ip_str.split(' ') if '.' in x] if len(ip_list) == 0: return "Offline" return f'{ip_list[0]}' def save_config(): try: with open(config_file_path, 'w', encoding='utf-8') as save_file: save_file.write(json.dumps(configuration_dict)) except Exception as e: print("exception config save failed!", e) curve_vertial_axis_x_pos = 80 curve_horizontal_axis_width = 32 curve_linear = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35, 36: 36, 37: 37, 38: 38, 39: 39, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46, 47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61, 62: 62, 63: 63, 64: 64, 65: 65, 66: 66, 67: 67, 68: 68, 69: 69, 70: 70, 71: 71, 72: 72, 73: 73, 74: 74, 75: 75, 76: 76, 77: 77, 78: 78, 79: 79, 80: 80, 81: 81, 82: 82, 83: 83, 84: 84, 85: 85, 86: 86, 87: 87, 88: 88, 89: 89, 90: 90, 91: 91, 92: 92, 93: 93, 94: 94, 95: 95, 96: 96, 97: 97, 98: 98, 99: 99, 100: 100, 101: 101, 102: 102, 103: 103, 104: 104, 105: 105, 106: 106, 107: 107, 108: 108, 109: 109, 110: 110, 111: 111, 112: 112, 113: 113, 114: 114, 115: 115, 116: 116, 117: 117, 118: 118, 119: 119, 120: 120, 121: 121, 122: 122, 123: 123, 124: 124, 125: 125, 126: 126, 127: 127} curve1 = {0: 1, 1: 1, 2: 2, 3: 2, 4: 3, 5: 4, 6: 4, 7: 5, 8: 5, 9: 6, 10: 6, 11: 7, 12: 7, 13: 8, 14: 8, 15: 9, 16: 9, 17: 10, 18: 11, 19: 11, 20: 12, 21: 12, 22: 13, 23: 13, 24: 14, 25: 15, 26: 15, 27: 16, 28: 16, 29: 17, 30: 18, 31: 18, 32: 19, 33: 19, 34: 20, 35: 21, 36: 21, 37: 22, 38: 22, 39: 23, 40: 24, 41: 24, 42: 25, 43: 26, 44: 26, 45: 27, 46: 28, 47: 28, 48: 29, 49: 30, 50: 30, 51: 31, 52: 32, 53: 33, 54: 33, 55: 34, 56: 35, 57: 36, 58: 36, 59: 37, 60: 38, 61: 39, 62: 39, 63: 40, 64: 41, 65: 42, 66: 43, 67: 44, 68: 45, 69: 46, 70: 46, 71: 47, 72: 48, 73: 49, 74: 50, 75: 51, 76: 52, 77: 53, 78: 55, 79: 56, 80: 57, 81: 58, 82: 59, 83: 60, 84: 61, 85: 62, 86: 63, 87: 65, 88: 66, 89: 67, 90: 68, 91: 70, 92: 71, 93: 72, 94: 73, 95: 75, 96: 76, 97: 77, 98: 79, 99: 80, 100: 81, 101: 83, 102: 84, 103: 86, 104: 87, 105: 89, 106: 90, 107: 92, 108: 93, 109: 95, 110: 96, 111: 98, 112: 100, 113: 101, 114: 103, 115: 105, 116: 106, 117: 108, 118: 110, 119: 112, 120: 113, 121: 115, 122: 117, 123: 119, 124: 121, 125: 123, 126: 125, 127: 127} curve2 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 3, 10: 3, 11: 3, 12: 3, 13: 4, 14: 4, 15: 4, 16: 4, 17: 5, 18: 5, 19: 5, 20: 5, 21: 6, 22: 6, 23: 6, 24: 7, 25: 7, 26: 7, 27: 8, 28: 8, 29: 8, 30: 8, 31: 9, 32: 9, 33: 9, 34: 10, 35: 10, 36: 10, 37: 11, 38: 11, 39: 12, 40: 12, 41: 12, 42: 13, 43: 13, 44: 13, 45: 14, 46: 14, 47: 15, 48: 15, 49: 15, 50: 16, 51: 16, 52: 17, 53: 17, 54: 18, 55: 18, 56: 19, 57: 19, 58: 20, 59: 20, 60: 21, 61: 21, 62: 22, 63: 22, 64: 23, 65: 23, 66: 24, 67: 24, 68: 25, 69: 26, 70: 26, 71: 27, 72: 28, 73: 28, 74: 29, 75: 30, 76: 30, 77: 31, 78: 32, 79: 33, 80: 34, 81: 35, 82: 36, 83: 37, 84: 38, 85: 39, 86: 40, 87: 41, 88: 42, 89: 43, 90: 44, 91: 45, 92: 47, 93: 48, 94: 49, 95: 51, 96: 52, 97: 53, 98: 55, 99: 56, 100: 58, 101: 59, 102: 61, 103: 63, 104: 64, 105: 66, 106: 68, 107: 70, 108: 71, 109: 73, 110: 75, 111: 78, 112: 80, 113: 82, 114: 84, 115: 86, 116: 89, 117: 92, 118: 94, 119: 96, 120: 100, 121: 102, 122: 106, 123: 110, 124: 112, 125: 116, 126: 120, 127: 125} curve3 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 3, 32: 3, 33: 3, 34: 3, 35: 3, 36: 3, 37: 3, 38: 4, 39: 4, 40: 4, 41: 4, 42: 4, 43: 4, 44: 5, 45: 5, 46: 5, 47: 5, 48: 5, 49: 6, 50: 6, 51: 6, 52: 6, 53: 7, 54: 7, 55: 7, 56: 7, 57: 8, 58: 8, 59: 8, 60: 8, 61: 9, 62: 9, 63: 9, 64: 10, 65: 10, 66: 10, 67: 11, 68: 11, 69: 11, 70: 12, 71: 12, 72: 12, 73: 13, 74: 13, 75: 14, 76: 14, 77: 15, 78: 15, 79: 16, 80: 16, 81: 17, 82: 17, 83: 18, 84: 19, 85: 19, 86: 20, 87: 21, 88: 21, 89: 22, 90: 23, 91: 24, 92: 25, 93: 26, 94: 27, 95: 28, 96: 29, 97: 30, 98: 32, 99: 33, 100: 34, 101: 35, 102: 37, 103: 38, 104: 40, 105: 41, 106: 43, 107: 45, 108: 46, 109: 48, 110: 50, 111: 52, 112: 54, 113: 56, 114: 59, 115: 61, 116: 64, 117: 66, 118: 69, 119: 72, 120: 76, 121: 79, 122: 83, 123: 87, 124: 92, 125: 99, 126: 104, 127: 118} joystick_curve_list = [curve_linear, curve1, curve2, curve3] class usb4vc_menu(object): def cap_index(self, index, list_size): if index >= list_size: return 0 return index def __init__(self, pboard, conf_dict): super(usb4vc_menu, self).__init__() self.current_level = 0 self.current_page = 0 self.level_size = 6 self.page_size = [7, 6, 4, 1, 1, 5] self.kb_protocol_list = list(pboard['protocol_list_keyboard']) self.mouse_protocol_list = list(pboard['protocol_list_mouse']) self.gamepad_protocol_list = list(pboard['protocol_list_gamepad']) self.pb_info = dict(pboard) self.current_keyboard_protocol_index = self.cap_index(conf_dict.get('keyboard_protocol_index', 0), len(self.kb_protocol_list)) self.current_mouse_protocol_index = self.cap_index(conf_dict.get("mouse_protocol_index", 0), len(self.mouse_protocol_list)) self.current_mouse_sensitivity_offset_index = self.cap_index(conf_dict.get("mouse_sensitivity_index", 0), len(mouse_sensitivity_list)) self.current_gamepad_protocol_index = self.cap_index(conf_dict.get("gamepad_protocol_index", 0), len(self.gamepad_protocol_list)) self.current_keyboard_protocol = self.kb_protocol_list[self.current_keyboard_protocol_index] self.current_mouse_protocol = self.mouse_protocol_list[self.current_mouse_protocol_index] self.current_gamepad_protocol = self.gamepad_protocol_list[self.current_gamepad_protocol_index] self.current_joystick_curve_index = self.cap_index(conf_dict.get("joystick_curve_index", 0), len(joystick_curve_list)) self.last_spi_message = [] self.bluetooth_device_list = None self.error_message = '' self.pairing_result = '' self.bt_scan_timeout_sec = 10 self.paired_devices_list = [] self.send_protocol_set_spi_msg() def switch_page(self, amount): self.current_page = (self.current_page + amount) % self.page_size[self.current_level] def goto_page(self, new_page): if new_page < self.page_size[self.current_level]: self.current_page = new_page def goto_level(self, new_level): if new_level < self.level_size: self.current_level = new_level self.current_page = 0 def draw_joystick_curve(self): this_curve = joystick_curve_list[self.current_joystick_curve_index % len(joystick_curve_list)] with canvas(usb4vc_oled.oled_device) as draw: draw.text((0, 0), "Joystick", font=usb4vc_oled.font_medium, fill="white") draw.text((0, 15), "Curve", font=usb4vc_oled.font_medium, fill="white") draw.line((curve_vertial_axis_x_pos, 0, curve_vertial_axis_x_pos, curve_vertial_axis_x_pos), fill="white") draw.line((curve_vertial_axis_x_pos, 31, curve_vertial_axis_x_pos+curve_horizontal_axis_width, 31), fill="white") for xxx in range(curve_horizontal_axis_width): dict_key = xxx*4 this_point_x = xxx + curve_vertial_axis_x_pos this_point_y = usb4vc_oled.OLED_HEIGHT - this_curve[dict_key]//4 - 1 draw.line((this_point_x,this_point_y,this_point_x,this_point_y), fill="white") def display_page(self, level, page): if level == 0: if page == 0: with canvas(usb4vc_oled.oled_device) as draw: mouse_count, kb_count, gp_count = usb4vc_usb_scan.get_device_count() draw.text((0, 0), f"KBD {kb_count} {self.current_keyboard_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white") draw.text((0, 10), f"MOS {mouse_count} {self.current_mouse_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white") draw.text((0, 20), f"GPD {gp_count} {self.current_gamepad_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white") if page == 1: with canvas(usb4vc_oled.oled_device) as draw: if 'Unknown' in self.pb_info['full_name']: draw.text((0, 0), f"{self.pb_info['full_name']} PID {this_pboard_id}", font=usb4vc_oled.font_regular, fill="white") else: draw.text((0, 0), f"{self.pb_info['full_name']}", font=usb4vc_oled.font_regular, fill="white") draw.text((0, 10), f"PB {self.pb_info['fw_ver'][0]}.{self.pb_info['fw_ver'][1]}.{self.pb_info['fw_ver'][2]} RPi {usb4vc_shared.RPI_APP_VERSION_TUPLE[0]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[1]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[2]}", font=usb4vc_oled.font_regular, fill="white") draw.text((0, 20), f"IP: {get_ip_address()}", font=usb4vc_oled.font_regular, fill="white") if page == 2: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Load Custom", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Config from USB", usb4vc_oled.font_medium, 16, draw) if page == 3: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Internet Update", usb4vc_oled.font_medium, 10, draw) if page == 4: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Show Event Codes", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("(experimental)", usb4vc_oled.font_regular, 20, draw) if page == 5: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Remove BT Device", usb4vc_oled.font_medium, 10, draw) if page == 6: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Pair Bluetooth", usb4vc_oled.font_medium, 10, draw) if level == 1: if page == 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Keyboard Protocol", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(self.kb_protocol_list[self.current_keyboard_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw) if page == 1: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Mouse Protocol", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(self.mouse_protocol_list[self.current_mouse_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw) if page == 2: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Gamepad Protocol", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(self.gamepad_protocol_list[self.current_gamepad_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw) if page == 3: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Mouse Sensitivity", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(f"{mouse_sensitivity_list[self.current_mouse_sensitivity_offset_index]}", usb4vc_oled.font_medium, 15, draw) if page == 4: self.draw_joystick_curve() if page == 5: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Save & Quit", usb4vc_oled.font_medium, 10, draw) if level == 2: if page == 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Put your device in", usb4vc_oled.font_regular, 0, draw) usb4vc_oled.oled_print_centered("pairing mode now.", usb4vc_oled.font_regular, 10, draw) usb4vc_oled.oled_print_centered("Press enter to start", usb4vc_oled.font_regular, 20, draw) if page == 1: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Scanning...", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Please wait", usb4vc_oled.font_medium, 15, draw) result, self.error_message = bt_setup() if result != 0: self.goto_page(3) self.display_curent_page() return paired_devices_set = get_paired_devices() self.bluetooth_device_list, self.error_message = scan_bt_devices(self.bt_scan_timeout_sec) self.bluetooth_device_list = list(set(self.bluetooth_device_list) - paired_devices_set) if len(self.bluetooth_device_list) == 0: self.error_message = "Nothing was found" self.goto_page(3) self.display_curent_page() return print("BT LIST:", self.bluetooth_device_list) # set up level 3 menu structure self.page_size[3] = len(self.bluetooth_device_list) + 1 self.goto_level(3) self.display_curent_page() if page == 2: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Pairing result:", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(self.pairing_result, usb4vc_oled.font_regular, 20, draw) if page == 3: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Bluetooth Error!", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(self.error_message, usb4vc_oled.font_regular, 20, draw) if level == 3: if page == self.page_size[3] - 1: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Exit", usb4vc_oled.font_medium, 10, draw) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered(f"Found {len(self.bluetooth_device_list)}. Pair this?", usb4vc_oled.font_regular, 0, draw) usb4vc_oled.oled_print_centered(f"{self.bluetooth_device_list[page][1]}", usb4vc_oled.font_regular, 10, draw) usb4vc_oled.oled_print_centered(f"{self.bluetooth_device_list[page][0]}", usb4vc_oled.font_regular, 20, draw) if level == 4: if page == self.page_size[4] - 1: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Exit", usb4vc_oled.font_medium, 10, draw) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered(f"Remove this?", usb4vc_oled.font_regular, 0, draw) usb4vc_oled.oled_print_centered(f"{self.paired_devices_list[page][1]}", usb4vc_oled.font_regular, 10, draw) usb4vc_oled.oled_print_centered(f"{self.paired_devices_list[page][0]}", usb4vc_oled.font_regular, 20, draw) if level == 5: if page == 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Power Down", usb4vc_oled.font_medium, 10, draw) if page == 1: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Relaunch", usb4vc_oled.font_medium, 10, draw) if page == 2: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Reboot", usb4vc_oled.font_medium, 10, draw) if page == 3: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Exit to Linux", usb4vc_oled.font_medium, 10, draw) if page == 4: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Cancel", usb4vc_oled.font_medium, 10, draw) def send_protocol_set_spi_msg(self): status_dict = {} for index, item in enumerate(self.kb_protocol_list): if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1: continue status_dict[item['pid'] & 0x7f] = 0 if index == self.current_keyboard_protocol_index: status_dict[item['pid'] & 0x7f] = 1 for index, item in enumerate(self.mouse_protocol_list): if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1: continue status_dict[item['pid'] & 0x7f] = 0 if index == self.current_mouse_protocol_index: status_dict[item['pid'] & 0x7f] = 1 for index, item in enumerate(self.gamepad_protocol_list): if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1: continue status_dict[item['pid'] & 0x7f] = 0 if index == self.current_gamepad_protocol_index: status_dict[item['pid'] & 0x7f] = 1 protocol_bytes = [] for key in status_dict: if key == PROTOCOL_OFF['pid']: continue if status_dict[key]: protocol_bytes.append(key | 0x80) else: protocol_bytes.append(key) this_msg = list(set_protocl_spi_msg_template) this_msg[3:3+len(protocol_bytes)] = protocol_bytes self.current_keyboard_protocol = self.kb_protocol_list[self.current_keyboard_protocol_index] self.current_mouse_protocol = self.mouse_protocol_list[self.current_mouse_protocol_index] self.current_gamepad_protocol = self.gamepad_protocol_list[self.current_gamepad_protocol_index] if this_msg == self.last_spi_message: print("SPI: no need to send") return print("set_protocol:", [hex(x) for x in this_msg]) usb4vc_usb_scan.set_protocol(this_msg) print('new status:', [hex(x) for x in usb4vc_usb_scan.get_pboard_info()]) self.last_spi_message = list(this_msg) def action(self, level, page): if level == 0: if page == 2: usb_present, config_path = check_usb_drive() if usb_present is False: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Error:", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(str(config_path), usb4vc_oled.font_regular, 16, draw) time.sleep(3) self.goto_level(0) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Copying", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Debug Log...", usb4vc_oled.font_medium, 16, draw) copy_debug_log() time.sleep(2) with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Copying custom", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("mapping...", usb4vc_oled.font_medium, 16, draw) time.sleep(2) update_from_usb(config_path) with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Update complete!", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Relaunching...", usb4vc_oled.font_medium, 16, draw) time.sleep(3) usb4vc_oled.oled_device.clear() os._exit(0) self.goto_level(0) elif page == 3: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Updating...", usb4vc_oled.font_medium, 10, draw) fffff = usb4vc_check_update.download_latest_firmware(this_pboard_id) if fffff != 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Unable to download", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(f"firmware: {fffff}", usb4vc_oled.font_medium, 16, draw) elif update_pboard_firmware(this_pboard_id): with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Firmware updated!", usb4vc_oled.font_medium, 10, draw) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("FW update ERR or", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("already newest", usb4vc_oled.font_medium, 15, draw) time.sleep(3) with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Updating code...", usb4vc_oled.font_medium, 10, draw) time.sleep(1) update_result = usb4vc_check_update.update(temp_dir_path) if update_result[0] == 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Update complete!", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Relaunching...", usb4vc_oled.font_medium, 16, draw) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Update failed:", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered(f"{update_result[-1]} {update_result[0]}", usb4vc_oled.font_regular, 16, draw) time.sleep(4) usb4vc_oled.oled_device.clear() os._exit(0) elif page == 4: try: usb4vc_show_ev.ev_loop([plus_button, minus_button, enter_button]) except Exception as e: print('exception ev_loop:', e) self.goto_level(0) elif page == 5: self.paired_devices_list = list(get_paired_devices()) self.page_size[4] = len(self.paired_devices_list) + 1 self.goto_level(4) elif page == 6: self.goto_level(2) else: self.goto_level(1) if level == 1: if page == 0: self.current_keyboard_protocol_index = (self.current_keyboard_protocol_index + 1) % len(self.kb_protocol_list) if page == 1: self.current_mouse_protocol_index = (self.current_mouse_protocol_index + 1) % len(self.mouse_protocol_list) if page == 2: self.current_gamepad_protocol_index = (self.current_gamepad_protocol_index + 1) % len(self.gamepad_protocol_list) if page == 3: self.current_mouse_sensitivity_offset_index = (self.current_mouse_sensitivity_offset_index + 1) % len(mouse_sensitivity_list) if page == 4: self.current_joystick_curve_index = (self.current_joystick_curve_index + 1) % len(joystick_curve_list) self.draw_joystick_curve() if page == 5: configuration_dict[this_pboard_id]["keyboard_protocol_index"] = self.current_keyboard_protocol_index configuration_dict[this_pboard_id]["mouse_protocol_index"] = self.current_mouse_protocol_index configuration_dict[this_pboard_id]["mouse_sensitivity_index"] = self.current_mouse_sensitivity_offset_index configuration_dict[this_pboard_id]["gamepad_protocol_index"] = self.current_gamepad_protocol_index configuration_dict[this_pboard_id]["joystick_curve_index"] = self.current_joystick_curve_index save_config() self.send_protocol_set_spi_msg() self.goto_level(0) if level == 2: if page == 0: self.switch_page(1) if page == 2: self.goto_level(0) if page == 3: self.goto_level(0) if level == 3: if page == self.page_size[3] - 1: self.goto_level(0) else: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Pairing...", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Please wait", usb4vc_oled.font_medium, 15, draw) print("pairing", self.bluetooth_device_list[page]) bt_mac_addr = self.bluetooth_device_list[page][0] is_successful, result_message = pair_device(bt_mac_addr) self.pairing_result = result_message.split('.')[-1].strip()[-22:] if is_successful: os.system(f'timeout {self.bt_scan_timeout_sec} bluetoothctl --agent NoInputNoOutput trust {bt_mac_addr}') os.system(f'timeout {self.bt_scan_timeout_sec} bluetoothctl --agent NoInputNoOutput connect {bt_mac_addr}') self.goto_level(2) self.goto_page(2) if level == 4: if page == self.page_size[4] - 1: self.goto_level(0) else: os.system(f'timeout 5 bluetoothctl --agent NoInputNoOutput untrust {self.paired_devices_list[page][0]}') os.system(f'timeout 5 bluetoothctl --agent NoInputNoOutput remove {self.paired_devices_list[page][0]}') self.goto_level(0) if level == 5: if page == 0: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Wait Until Green", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("LED Stops Blinking", usb4vc_oled.font_medium, 15, draw) time.sleep(2) os.system("sudo halt") while 1: time.sleep(1) if page == 1: usb4vc_oled.oled_device.clear() os._exit(0) if page == 2: with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Rebooting...", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Unplug if stuck >10s", usb4vc_oled.font_regular, 16, draw) os.system("sudo reboot") while 1: time.sleep(1) if page == 3: usb4vc_oled.oled_device.clear() os._exit(169) if page == 4: self.goto_level(0) self.display_curent_page() def action_current_page(self): self.action(self.current_level, self.current_page) def display_curent_page(self): self.display_page(self.current_level, self.current_page) def update_usb_status(self): if self.current_level == 0 and self.current_page == 0: self.display_page(0, 0) def update_board_status(self): if self.current_level == 0 and self.current_page == 1: self.display_page(0, 1) pboard_database = { PBOARD_ID_UNKNOWN:{'author':'Unknown', 'fw_ver':(0,0,0), 'full_name':'Unknown', 'hw_rev':0, 'protocol_list_keyboard':raw_keyboard_protocols, 'protocol_list_mouse':raw_mouse_protocols, 'protocol_list_gamepad':raw_gamepad_protocols}, PBOARD_ID_IBMPC:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'IBM PC Compatible', 'hw_rev':0, 'protocol_list_keyboard':ibmpc_keyboard_protocols, 'protocol_list_mouse':ibmpc_mouse_protocols, 'protocol_list_gamepad':ibmpc_gamepad_protocols}, PBOARD_ID_ADB:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'Apple Desktop Bus', 'hw_rev':0, 'protocol_list_keyboard':adb_keyboard_protocols, 'protocol_list_mouse':adb_mouse_protocols, 'protocol_list_gamepad':adb_gamepad_protocols}, } def get_pboard_dict(pid): if pid not in pboard_database: pid = 0 return pboard_database[pid] def get_mouse_sensitivity(): return mouse_sensitivity_list[configuration_dict[this_pboard_id]["mouse_sensitivity_index"]] def ui_init(): global pboard_info_spi_msg global this_pboard_id load_config() pboard_info_spi_msg = usb4vc_usb_scan.get_pboard_info() print("PB INFO:", pboard_info_spi_msg) this_pboard_id = pboard_info_spi_msg[3] if this_pboard_id in pboard_database: # load custom profile mapping into protocol list for item in custom_profile_list: this_mapping_bid = usb4vc_shared.board_id_lookup.get(item['protocol_board'], 0) if this_mapping_bid == this_pboard_id and item['device_type'] in pboard_database[this_pboard_id]: this_mapping_pid = usb4vc_shared.protocol_id_lookup.get(item['protocol_name']) item['pid'] = this_mapping_pid pboard_database[this_pboard_id][item['device_type']].append(item) pboard_database[this_pboard_id]['hw_rev'] = pboard_info_spi_msg[4] pboard_database[this_pboard_id]['fw_ver'] = (pboard_info_spi_msg[5], pboard_info_spi_msg[6], pboard_info_spi_msg[7]) if 'rpi_app_ver' not in configuration_dict: configuration_dict['rpi_app_ver'] = usb4vc_shared.RPI_APP_VERSION_TUPLE if this_pboard_id not in configuration_dict: configuration_dict[this_pboard_id] = {"keyboard_protocol_index":1, "mouse_protocol_index":1, "mouse_sensitivity_index":0, "gamepad_protocol_index":1} plus_button = my_button(PLUS_BUTTON_PIN) minus_button = my_button(MINUS_BUTTON_PIN) enter_button = my_button(ENTER_BUTTON_PIN) shutdown_button = my_button(SHUTDOWN_BUTTON_PIN) class oled_sleep_control(object): def __init__(self): super(oled_sleep_control, self).__init__() self.is_sleeping = False self.last_input_event = time.time() self.ui_loop_count = 0 def sleep(self): if self.is_sleeping is False: print("sleeping!") usb4vc_oled.oled_device.clear() self.is_sleeping = True # GPIO.output(SLEEP_LED_PIN, GPIO.HIGH) def wakeup(self): if self.is_sleeping: print("waking up!") my_menu.display_curent_page() self.last_input_event = time.time() self.is_sleeping = False # GPIO.output(SLEEP_LED_PIN, GPIO.LOW) def check_sleep(self): # time.time() might jump ahead a lot when RPi gets its time from network # this ensures OLED won't go to sleep too early if self.ui_loop_count <= 1500: return if time.time() - self.last_input_event > 180: self.sleep() else: self.wakeup() def kick(self): self.last_input_event = time.time() my_oled = oled_sleep_control() my_menu = None def ui_worker(): global my_menu print(configuration_dict) print("ui_worker started") my_menu = usb4vc_menu(get_pboard_dict(this_pboard_id), configuration_dict[this_pboard_id]) my_menu.display_page(0, 0) for x in range(2): GPIO.output(SLEEP_LED_PIN, GPIO.HIGH) time.sleep(0.2) GPIO.output(SLEEP_LED_PIN, GPIO.LOW) time.sleep(0.2) while 1: time.sleep(0.1) my_oled.ui_loop_count += 1 if my_oled.is_sleeping is False and my_oled.ui_loop_count % 5 == 0: my_menu.update_usb_status() my_menu.update_board_status() if plus_button.is_pressed(): my_oled.kick() if my_oled.is_sleeping: my_oled.wakeup() elif my_menu.current_level != 2: my_menu.switch_page(1) my_menu.display_curent_page() if minus_button.is_pressed(): my_oled.kick() if my_oled.is_sleeping: my_oled.wakeup() elif my_menu.current_level != 2: my_menu.switch_page(-1) my_menu.display_curent_page() if enter_button.is_pressed(): my_oled.kick() if my_oled.is_sleeping: my_oled.wakeup() else: my_menu.action_current_page() if shutdown_button.is_pressed(): my_oled.kick() if my_oled.is_sleeping: my_oled.wakeup() else: my_menu.goto_level(5) my_menu.display_curent_page() my_oled.check_sleep() def get_gamepad_protocol(): return my_menu.current_gamepad_protocol def get_joystick_curve(): return joystick_curve_list[my_menu.current_joystick_curve_index] def oled_print_model_changed(): with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("RPi Model Changed!", usb4vc_oled.font_regular, 0, draw) usb4vc_oled.oled_print_centered("Recompiling BT Driver", usb4vc_oled.font_regular, 10, draw) usb4vc_oled.oled_print_centered("Might take a while...", usb4vc_oled.font_regular, 20, draw) def oled_print_oneline(msg): with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered(msg, usb4vc_oled.font_medium, 10, draw) def oled_print_reboot(): with canvas(usb4vc_oled.oled_device) as draw: usb4vc_oled.oled_print_centered("Done! Rebooting..", usb4vc_oled.font_medium, 0, draw) usb4vc_oled.oled_print_centered("Unplug if stuck >10s", usb4vc_oled.font_regular, 16, draw) ui_thread = threading.Thread(target=ui_worker, daemon=True)
51.462641
1,075
0.63269
25,339
0.503968
0
0
0
0
0
0
7,369
0.146562
5d6f6e6ed3bbf01cb5af3d5c038344399c98f74f
384
py
Python
study/migrations/0003_auto_20200224_2316.py
hpathipati/Quick-Tutor
17476d79b87f51b12a6c8fc435d1a6506bff1e04
[ "PostgreSQL", "Unlicense", "MIT" ]
null
null
null
study/migrations/0003_auto_20200224_2316.py
hpathipati/Quick-Tutor
17476d79b87f51b12a6c8fc435d1a6506bff1e04
[ "PostgreSQL", "Unlicense", "MIT" ]
null
null
null
study/migrations/0003_auto_20200224_2316.py
hpathipati/Quick-Tutor
17476d79b87f51b12a6c8fc435d1a6506bff1e04
[ "PostgreSQL", "Unlicense", "MIT" ]
null
null
null
# Generated by Django 3.0.2 on 2020-02-24 23:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('study', '0002_student'), ] operations = [ migrations.AlterField( model_name='student', name='bio', field=models.CharField(blank=True, max_length=200), ), ]
20.210526
63
0.588542
291
0.757813
0
0
0
0
0
0
82
0.213542
5d6fd80c1e9176894348ae0d83e6981dbb3ecb3a
103,544
py
Python
tests/unit/resources/test_resource.py
gzecchi/oneview-python
949bc67ca3eaed324a6dc058620145d9e067e25b
[ "Apache-2.0" ]
null
null
null
tests/unit/resources/test_resource.py
gzecchi/oneview-python
949bc67ca3eaed324a6dc058620145d9e067e25b
[ "Apache-2.0" ]
null
null
null
tests/unit/resources/test_resource.py
gzecchi/oneview-python
949bc67ca3eaed324a6dc058620145d9e067e25b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- ### # (C) Copyright [2019] Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### import io import unittest import mock from mock import call from tests.test_utils import mock_builtin from hpOneView.connection import connection from hpOneView import exceptions from hpOneView.resources.resource import (ResourceClient, ResourceHelper, ResourceFileHandlerMixin, ResourceZeroBodyMixin, ResourcePatchMixin, ResourceUtilizationMixin, ResourceSchemaMixin, Resource, RESOURCE_CLIENT_INVALID_ID, UNRECOGNIZED_URI, TaskMonitor, RESOURCE_CLIENT_TASK_EXPECTED, RESOURCE_ID_OR_URI_REQUIRED, transform_list_to_dict, extract_id_from_uri, merge_resources, merge_default_values, unavailable_method) class StubResourceFileHandler(ResourceFileHandlerMixin, Resource): """Stub class to test resource file operations""" class StubResourceZeroBody(ResourceZeroBodyMixin, Resource): """Stub class to test resoruce zero body methods""" class StubResourcePatch(ResourcePatchMixin, Resource): """Stub class to test resource patch operations""" class StubResourceUtilization(ResourceUtilizationMixin, Resource): """Stub class to test resource utilization methods""" class StubResourceSchema(ResourceSchemaMixin, Resource): """Stub class to test resource schema methods""" class StubResource(Resource): """Stub class to test resource common methods""" URI = "/rest/testuri" class BaseTest(unittest.TestCase): URI = "/rest/testuri" TYPE_V200 = "typeV200" TYPE_V300 = "typeV300" DEFAULT_VALUES = { "200": {"type": TYPE_V200}, "300": {"type": TYPE_V300} } def setUp(self, resource_client=None): self.resource_client = resource_client self.resource_client.URI = self.URI self.resource_client.DEFAULT_VALUES = self.DEFAULT_VALUES self.resource_client.data = {"uri": "/rest/testuri"} self.resource_client._merge_default_values() self.task = {"task": "task", "taskState": "Finished"} self.response_body = {"body": "body"} self.custom_headers = {"Accept-Language": "en_US"} class ResourceFileHandlerMixinTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResourceFileHandler(self.connection) super(ResourceFileHandlerMixinTest, self).setUp(self.resource_client) @mock.patch.object(connection, "post_multipart_with_response_handling") def test_upload_should_call_post_multipart(self, mock_post_multipart): uri = "/rest/testuri/" filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath, uri) mock_post_multipart.assert_called_once_with(uri, filepath, "SPPgen9snap6.2015_0405.81.iso") @mock.patch.object(connection, "post_multipart_with_response_handling") def test_upload_should_call_post_multipart_with_resource_uri_when_not_uri_provided(self, mock_post_multipart): filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath) mock_post_multipart.assert_called_once_with("/rest/testuri", mock.ANY, mock.ANY) @mock.patch.object(connection, "post_multipart_with_response_handling") @mock.patch.object(TaskMonitor, "wait_for_task") @mock.patch.object(connection, "get") def test_upload_should_wait_for_task_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart): uri = "/rest/testuri/" filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = self.task, mock.Mock() self.resource_client.upload(filepath, uri) mock_wait4task.assert_called_once_with(self.task, -1) @mock.patch.object(connection, "post_multipart_with_response_handling") @mock.patch.object(TaskMonitor, "wait_for_task") def test_upload_should_not_wait_for_task_when_response_is_not_task(self, mock_wait4task, mock_post_multipart): uri = "/rest/testuri/" filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath, uri) mock_wait4task.not_been_called() @mock.patch.object(connection, "post_multipart_with_response_handling") @mock.patch.object(TaskMonitor, "wait_for_task") @mock.patch.object(connection, "get") def test_upload_should_return_associated_resource_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart): fake_associated_resurce = mock.Mock() uri = "/rest/testuri/" filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = self.task, mock.Mock() mock_wait4task.return_value = fake_associated_resurce result = self.resource_client.upload(filepath, uri) self.assertEqual(result, fake_associated_resurce) @mock.patch.object(connection, "post_multipart_with_response_handling") @mock.patch.object(TaskMonitor, "wait_for_task") def test_upload_should_return_resource_when_response_is_not_task(self, mock_wait4task, mock_post_multipart): fake_response_body = mock.Mock() uri = "/rest/testuri/" filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, fake_response_body result = self.resource_client.upload(filepath, uri) self.assertEqual(result, fake_response_body) @mock.patch.object(connection, "download_to_stream") @mock.patch(mock_builtin("open")) def test_download_should_call_download_to_stream_with_given_uri(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315" mock_open.return_value = io.StringIO() self.resource_client.download(uri, file_path) mock_download_to_stream.assert_called_once_with(mock.ANY, uri) @mock.patch.object(connection, "download_to_stream") @mock.patch(mock_builtin("open")) def test_download_should_call_download_to_stream_with_open_file(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315" fake_file = io.StringIO() mock_open.return_value = fake_file self.resource_client.download(uri, file_path) mock_open.assert_called_once_with(file_path, 'wb') mock_download_to_stream.assert_called_once_with(fake_file, mock.ANY) @mock.patch.object(connection, "download_to_stream") @mock.patch(mock_builtin("open")) def test_download_should_return_true_when_success(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315" mock_download_to_stream.return_value = True mock_open.return_value = io.StringIO() result = self.resource_client.download(uri, file_path) self.assertTrue(result) @mock.patch.object(connection, "download_to_stream") @mock.patch(mock_builtin("open")) def test_download_should_return_false_when_error(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315" mock_download_to_stream.return_value = False mock_open.return_value = io.StringIO() result = self.resource_client.download(uri, file_path) self.assertFalse(result) class ResourceZeroBodyMixinTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResourceZeroBody(self.connection) super(ResourceZeroBodyMixinTest, self).setUp(self.resource_client) @mock.patch.object(connection, "post") @mock.patch.object(TaskMonitor, "wait_for_task") def test_create_with_zero_body_called_once(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body() mock_post.assert_called_once_with( "/rest/testuri", {}, custom_headers=None) @mock.patch.object(connection, "post") @mock.patch.object(TaskMonitor, "wait_for_task") def test_create_with_zero_body_called_once_without_uri(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body(timeout=-1) mock_post.assert_called_once_with( "/rest/testuri", {}, custom_headers=None) @mock.patch.object(connection, "post") @mock.patch.object(TaskMonitor, "wait_for_task") def test_create_with_zero_body_and_custom_headers(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body(custom_headers=self.custom_headers) mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(connection, "post") @mock.patch.object(TaskMonitor, "wait_for_task") def test_create_with_zero_body_return_entity(self, mock_wait4task, mock_post): response_body = {"resource_name": "name"} mock_post.return_value = self.task, self.task mock_wait4task.return_value = response_body new_resource = self.resource_client.create_with_zero_body(timeout=-1) self.assertNotEqual(new_resource, self.resource_client) @mock.patch.object(connection, "post") def test_create_with_zero_body_without_task(self, mock_post): mock_post.return_value = None, self.response_body new_resource = self.resource_client.create_with_zero_body() self.assertNotEqual(new_resource, self.resource_client) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") @mock.patch.object(TaskMonitor, "wait_for_task") def test_update_with_zero_body_called_once(self, mock_wait4task, mock_update, mock_ensure_resource): mock_update.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.URI = "/rest/enclosures" self.resource_client.update_with_zero_body("/rest/enclosures/09USE133E5H4/configuration", timeout=-1) mock_update.assert_called_once_with( "/rest/enclosures/09USE133E5H4/configuration", None, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") @mock.patch.object(TaskMonitor, "wait_for_task") def test_update_with_zero_body_and_custom_headers(self, mock_wait4task, mock_update, mock_ensure_resource): mock_update.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.update_with_zero_body(uri="/rest/testuri", custom_headers=self.custom_headers) mock_update.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") @mock.patch.object(TaskMonitor, "wait_for_task") def test_update_with_zero_body_return_entity(self, mock_wait4task, mock_put, mock_ensure_resource): response_body = {"resource_name": "name"} self.resource_client.URI = "/rest/enclosures" mock_put.return_value = self.task, self.task mock_wait4task.return_value = response_body result = self.resource_client.update_with_zero_body( "/rest/enclosures/09USE133E5H4/configuration", timeout=-1) self.assertEqual(result, response_body) @mock.patch.object(connection, "put") def test_update_with_zero_body_without_task(self, mock_put): mock_put.return_value = None, self.response_body self.resource_client.URI = "/rest/enclosures" result = self.resource_client.update_with_zero_body( "/rest/enclosures/09USE133E5H4/configuration", timeout=-1) self.assertEqual(result, self.response_body) class ResourcePatchMixinTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResourcePatch(self.connection) super(ResourcePatchMixinTest, self).setUp(self.resource_client) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") def test_patch_request_when_id_is_provided_v200(self, mock_patch, mock_ensure_resource): uri = "/rest/testuri" request_body = [{ "op": "replace", "path": "/name", "value": "new_name", }] mock_patch.return_value = {}, {} self.connection._apiVersion = 200 self.resource_client.patch("replace", "/name", "new_name") mock_patch.assert_called_once_with(uri, request_body, custom_headers={}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") def test_patch_request_when_id_is_provided_v300(self, mock_patch, mock_ensure_resource): request_body = [{ "op": "replace", "path": "/name", "value": "new_name", }] mock_patch.return_value = {}, {} self.resource_client.patch("replace", "/name", "new_name") mock_patch.assert_called_once_with( "/rest/testuri", request_body, custom_headers={"Content-Type": "application/json-patch+json"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") def test_patch_request_when_uri_is_provided(self, mock_patch, mock_ensure_resource): request_body = [{ "op": "replace", "path": "/name", "value": "new_name", }] mock_patch.return_value = {}, {} self.resource_client.patch("replace", "/name", "new_name") mock_patch.assert_called_once_with( "/rest/testuri", request_body, custom_headers={"Content-Type": "application/json-patch+json"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") def test_patch_with_custom_headers_v200(self, mock_patch, mock_ensure_resource): mock_patch.return_value = {}, {} self.connection._apiVersion = 200 self.resource_client.patch("operation", "/field", "value", custom_headers=self.custom_headers) mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") def test_patch_with_custom_headers_v300(self, mock_patch, mock_ensure_resource): mock_patch.return_value = {}, {} self.resource_client.patch("operation", "/field", "value", custom_headers=self.custom_headers) mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US", "Content-Type": "application/json-patch+json"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") @mock.patch.object(TaskMonitor, "wait_for_task") def test_patch_return_entity(self, mock_wait4task, mock_patch, mock_ensure_resource): entity = {"resource_id": "123a53cz"} mock_patch.return_value = self.task, self.task mock_wait4task.return_value = entity self.resource_client.patch("replace", "/name", "new_name") self.assertEqual(self.resource_client.data, entity) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") @mock.patch.object(TaskMonitor, "get_completed_task") def test_patch_request_custom_headers_with_content_type(self, mock_task, mock_patch, mock_ensure_resource): uri = "/rest/testuri" dict_info = {"resource_name": "a name"} mock_patch.return_value = {}, {} headers = {"Content-Type": "application/json", "Extra": "extra"} self.connection._apiVersion = 300 self.resource_client.patch_request(uri, body=dict_info, custom_headers=headers) mock_patch.assert_called_once_with(uri, dict_info, custom_headers=headers) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") @mock.patch.object(TaskMonitor, "get_completed_task") def test_patch_request_custom_headers(self, mock_task, mock_patch, mock_ensure_resource): uri = "/rest/testuri" dict_info = {"resource_name": "a name"} mock_patch.return_value = {}, {} headers = {"Extra": "extra"} self.connection._apiVersion = 300 self.resource_client.patch_request(uri, body=dict_info, custom_headers=headers) mock_patch.assert_called_once_with( uri, dict_info, custom_headers={"Extra": "extra", "Content-Type": "application/json-patch+json"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "patch") @mock.patch.object(TaskMonitor, "wait_for_task") def test_wait_for_activity_on_patch(self, mock_wait4task, mock_patch, mock_ensure_resource): entity = {"resource_id": "123a53cz"} mock_patch.return_value = self.task, self.task mock_wait4task.return_value = entity self.resource_client.patch("replace", "/name", "new_name") mock_wait4task.assert_called_once_with(self.task, mock.ANY) class ResourceUtilizationMixinTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResourceUtilization(self.connection) super(ResourceUtilizationMixinTest, self).setUp(self.resource_client) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "get") def test_get_utilization_with_args(self, mock_get, mock_ensure_resource): self.resource_client.get_utilization(fields="AmbientTemperature,AveragePower,PeakPower", filter="startDate=2016-05-30T03:29:42.361Z", refresh=True, view="day") expected_uri = "/rest/testuri/utilization" \ "?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z" \ "&fields=AmbientTemperature%2CAveragePower%2CPeakPower" \ "&refresh=true" \ "&view=day" mock_get.assert_called_once_with(expected_uri) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "get") def test_get_utilization_with_multiple_filters(self, mock_get, mock_ensure_resource): self.resource_client.get_utilization( fields="AmbientTemperature,AveragePower,PeakPower", filter=["startDate=2016-05-30T03:29:42.361Z", "endDate=2016-05-31T03:29:42.361Z"], refresh=True, view="day") expected_uri = "/rest/testuri/utilization" \ "?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z" \ "&filter=endDate%3D2016-05-31T03%3A29%3A42.361Z" \ "&fields=AmbientTemperature%2CAveragePower%2CPeakPower" \ "&refresh=true" \ "&view=day" mock_get.assert_called_once_with(expected_uri) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "get") def test_get_utilization_by_id_with_defaults(self, mock_get, mock_ensure_resource): self.resource_client.get_utilization() expected_uri = "/rest/testuri/utilization" mock_get.assert_called_once_with(expected_uri) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "get") def test_get_utilization_by_uri_with_defaults(self, mock_get, mock_ensure_resource): self.resource_client.get_utilization() expected_uri = "/rest/testuri/utilization" mock_get.assert_called_once_with(expected_uri) class ResourceSchemaMixinTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResourceSchema(self.connection) super(ResourceSchemaMixinTest, self).setUp(self.resource_client) @mock.patch.object(connection, "get") def test_get_schema_uri(self, mock_get): self.resource_client.get_schema() mock_get.assert_called_once_with(self.URI + "/schema") class ResourceTest(BaseTest): def setUp(self): self.connection = connection('127.0.0.1', 300) self.resource_client = StubResource(self.connection) super(ResourceTest, self).setUp(self.resource_client) self.resource_helper = ResourceHelper(self.URI, self.connection, None) @mock.patch.object(ResourceHelper, "do_put") @mock.patch.object(Resource, "ensure_resource_data") def test_ensure_resource_should_call_once(self, mock_do_put, mock_ensure_resource): self.resource_client.data = {"uri": "/rest/test"} self.resource_client.update(data={"name": "test"}) mock_do_put.assert_called_once() mock_ensure_resource.assert_called_once() def test_ensure_resource_raise_unique_identifier_exception(self): self.resource_client.data = [] self.assertRaises(exceptions.HPOneViewMissingUniqueIdentifiers, self.resource_client.ensure_resource_data) @mock.patch.object(ResourceHelper, "do_get") def test_ensure_resource_raise_resource_not_found_exception_with_uri(self, mock_do_get): self.resource_client.data = {"uri": "/uri/test"} mock_do_get.return_value = [] with self.assertRaises(exceptions.HPOneViewResourceNotFound): self.resource_client.ensure_resource_data(update_data=True) @mock.patch.object(Resource, "get_by") def test_ensure_resource_raise_resource_not_found_exception_without_uri(self, mock_get_by): self.resource_client.data = {"name": "testname"} mock_get_by.return_value = [] with self.assertRaises(exceptions.HPOneViewResourceNotFound): self.resource_client.ensure_resource_data(update_data=True) @mock.patch.object(ResourceHelper, "do_get") @mock.patch.object(Resource, "get_by") def test_ensure_resource_should_update_resource_data(self, mock_do_get, mock_get_by): get_by_return_value = [{"name": "testname", "uri": "/rest/testuri"}] self.resource_client.data = {"name": "testname"} mock_do_get.return_value = get_by_return_value self.resource_client.ensure_resource_data(update_data=True) self.assertEqual(self.resource_client.data, get_by_return_value[0]) @mock.patch.object(Resource, "get_by") def test_ensure_resource_without_data_update(self, mock_get_by): mock_get_by.return_value = [] actual_result = self.resource_client.ensure_resource_data(update_data=False) expected_result = None self.assertEqual(actual_result, expected_result) @mock.patch.object(connection, "get") def test_get_all_called_once(self, mock_get): filter = "'name'='OneViewSDK \"Test FC Network'" sort = "name:ascending" query = "name NE 'WrongName'" mock_get.return_value = {"members": [{"member": "member"}]} result = self.resource_helper.get_all( 1, 500, filter, query, sort) uri = "{resource_uri}?start=1" \ "&count=500" \ "&filter=%27name%27%3D%27OneViewSDK%20%22Test%20FC%20Network%27" \ "&query=name%20NE%20%27WrongName%27" \ "&sort=name%3Aascending".format(resource_uri=self.URI) self.assertEqual([{"member": "member"}], result) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, "get") def test_get_all_with_defaults(self, mock_get): self.resource_client.get_all() uri = "{resource_uri}?start=0&count=-1".format(resource_uri=self.URI) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, "get") def test_get_all_with_custom_uri(self, mock_get): self.resource_helper.get_all(uri="/rest/testuri/12467836/subresources") uri = "/rest/testuri/12467836/subresources?start=0&count=-1" mock_get.assert_called_once_with(uri) @mock.patch.object(connection, "get") def test_get_all_with_custom_uri_and_query_string(self, mock_get): self.resource_helper.get_all(uri="/rest/testuri/12467836/subresources?param=value") uri = "/rest/testuri/12467836/subresources?param=value&start=0&count=-1" mock_get.assert_called_once_with(uri) @mock.patch.object(connection, "get") def test_get_all_with_different_resource_uri_should_fail(self, mock_get): try: self.resource_helper.get_all(uri="/rest/other/resource/12467836/subresources") except exceptions.HPOneViewUnknownType as e: self.assertEqual(UNRECOGNIZED_URI, e.args[0]) else: self.fail("Expected Exception was not raised") @mock.patch.object(connection, "get") def test_get_all_should_do_multi_requests_when_response_paginated(self, mock_get): uri_list = ["/rest/testuri?start=0&count=-1", "/rest/testuri?start=3&count=3", "/rest/testuri?start=6&count=3"] results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]}, {"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]}, {"nextPageUri": None, "members": [{"id": "7"}, {"id": "8"}]}] mock_get.side_effect = results self.resource_client.get_all() expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])] self.assertEqual(mock_get.call_args_list, expected_calls) @mock.patch.object(connection, "get") def test_get_all_with_count_should_do_multi_requests_when_response_paginated(self, mock_get): uri_list = ["/rest/testuri?start=0&count=15", "/rest/testuri?start=3&count=3", "/rest/testuri?start=6&count=3"] results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]}, {"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]}, {'nextPageUri': None, "members": [{"id": "7"}, {"id": "8"}]}] mock_get.side_effect = results self.resource_client.get_all(count=15) expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])] self.assertEqual(mock_get.call_args_list, expected_calls) @mock.patch.object(connection, "get") def test_get_all_should_return_all_items_when_response_paginated(self, mock_get): uri_list = ["/rest/testuri?start=0&count=-1", "/rest/testuri?start=3&count=3", "/rest/testuri?start=6&count=1"] results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]}, {"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]}, {"nextPageUri": None, "members": [{"id": "7"}]}] mock_get.side_effect = results result = self.resource_client.get_all() expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}] self.assertSequenceEqual(result, expected_items) @mock.patch.object(connection, 'get') def test_get_all_should_limit_results_to_requested_count_when_response_is_paginated(self, mock_get): uri_list = ['/rest/testuri?start=0&count=15', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=1'] results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]}, {"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]}, {"nextPageUri": None, "members": [{"id": "7"}]}] mock_get.side_effect = results result = self.resource_client.get_all(count=15) expected_items = [{"id": "1"}, {"id": "2"}, {"id": "3"}, {"id": "4"}, {"id": "5"}, {"id": "6"}, {"id": "7"}] self.assertSequenceEqual(result, expected_items) @mock.patch.object(connection, "get") def test_get_all_should_stop_requests_when_requested_count_reached(self, mock_get): """ In this case, the user provides a maximum number of results to be returned but for pagination purposes, a nextPageUri is returned by OneView. """ uri_list = ["/rest/testuri?start=0&count=3", "/rest/testuri?start=3&count=3", "/rest/testuri?start=6&count=3"] results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]}, {"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]}, {"nextPageUri": None, "members": [{"id": "7"}, {"id": "8"}]}] mock_get.side_effect = results self.resource_client.get_all(count=3) mock_get.assert_called_once_with(uri_list[0]) @mock.patch.object(connection, "get") def test_get_all_should_stop_requests_when_next_page_is_equal_to_current_page(self, mock_get): uri = "/rest/testuri?start=0&count=-1" members = [{"id": "1"}, {"id": "2"}, {"id": "3"}] mock_get.return_value = { "nextPageUri": uri, "members": members, "uri": uri } result = self.resource_client.get_all() self.assertSequenceEqual(result, members) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, "get") def test_get_all_should_return_empty_list_when_response_has_no_items(self, mock_get): mock_get.return_value = {"nextPageUri": None, "members": []} result = self.resource_client.get_all() self.assertEqual(result, []) @mock.patch.object(connection, "get") def test_get_all_should_return_empty_list_when_no_members(self, mock_get): mock_get.return_value = {"nextPageUri": None, "members": None} result = self.resource_client.get_all() self.assertEqual(result, []) @mock.patch.object(ResourceHelper, "do_get") def test_refresh(self, mock_do_get): updated_data = {"resource_name": "updated name"} mock_do_get.return_value = updated_data self.resource_client.refresh() self.assertEqual(self.resource_client.data, updated_data) @mock.patch.object(connection, "post") def test_create_uri(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} expected_dict = {"resource_name": "a name", "type": self.TYPE_V300} self.resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None) @mock.patch.object(connection, "post") def test_create_with_api_version_200(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} self.connection._apiVersion = 200 self.resource_client._merge_default_values() expected_dict = {"resource_name": "a name", "type": self.TYPE_V200} self.resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None) @mock.patch.object(connection, "post") def test_create_with_default_api_version_300(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} expected_dict = {"resource_name": "a name", "type": self.TYPE_V300} self.resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None) @mock.patch.object(connection, "post") def test_create_should_not_override_resource_properties(self, mock_post): dict_to_create = {"resource_name": "a name", "type": "anotherType"} mock_post.return_value = {}, {} expected = {"resource_name": "a name", "type": "anotherType"} self.resource_client.create(dict_to_create) mock_post.assert_called_once_with(self.URI, expected, custom_headers=None) @mock.patch.object(connection, "post") def test_create_without_default_values(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} resource_client = ResourceClient(self.connection, self.URI) resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None) @mock.patch.object(connection, "post") def test_create_with_custom_headers(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} self.resource_client.create(dict_to_create, custom_headers=self.custom_headers) mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(connection, "post") def test_create_should_return_new_resource_instance(self, mock_post): mock_post.return_value = {}, {} new_instance = self.resource_client.create({}) self.assertNotEqual(self.resource_client, new_instance) @mock.patch.object(connection, "post") @mock.patch.object(TaskMonitor, "wait_for_task") def test_wait_for_activity_on_create(self, mock_wait4task, mock_post): mock_post.return_value = self.task, {} mock_wait4task.return_value = self.task self.resource_client.create({"test": "test"}, timeout=60) mock_wait4task.assert_called_once_with(self.task, 60) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "delete") def test_delete_should_return_true(self, mock_delete, mock_ensure_resource): mock_delete.return_value = None, self.response_body self.resource_client.data = {"uri": "/rest/testuri"} result = self.resource_client.delete() self.assertTrue(result) @mock.patch.object(connection, 'delete') def test_helper_delete_all_should_return_true(self, mock_delete): mock_delete.return_value = None, self.response_body filter = "name='Exchange Server'" result = self.resource_helper.delete_all(filter=filter, force=True, timeout=-1) self.assertTrue(result) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "delete") @mock.patch.object(TaskMonitor, "wait_for_task") def test_delete_with_force(self, mock_ensure_resource, mock_delete, mock_wait4task): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task self.resource_client.data = {"uri": "/rest/testuri"} self.resource_client.delete(force=True) mock_delete.assert_called_once_with("/rest/testuri?force=True", custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "delete") @mock.patch.object(TaskMonitor, "wait_for_task") def test_delete_with_custom_headers(self, mock_ensure_resource, mock_delete, mock_wait4task): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task self.resource_client.data = {"uri": "/rest/testuri"} self.resource_client.delete(custom_headers=self.custom_headers) mock_delete.assert_called_once_with(mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_with_uri_called_once(self, mock_put, mock_ensure_resource): uri = "/rest/testuri" dict_to_update = {"name": "test", "type": "typeV300"} self.resource_client.data = {'uri': uri} expected = {"name": "test", "type": "typeV300", "uri": uri} mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update) self.assertEqual(self.response_body, self.resource_client.data) mock_put.assert_called_once_with(uri, expected, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_with_custom_headers(self, mock_put, mock_ensure_resource): dict_to_update = {"name": "test"} mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update, custom_headers=self.custom_headers) mock_put.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"}) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_with_force(self, mock_put, mock_laod_resource): dict_to_update = {"name": "test"} uri = "/rest/testuri" expected = {"name": "test", "uri": uri, "type": "typeV300"} mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update) expected_uri = "/rest/testuri" mock_put.assert_called_once_with(expected_uri, expected, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_with_default_api_version_300(self, mock_put, mock_ensure_resource): dict_to_update = {"name": "test"} uri = "/rest/testuri" mock_put.return_value = None, self.response_body expected_dict = {"name": "test", "type": self.TYPE_V300, "uri": uri} self.resource_client._merge_default_values() self.resource_client.update(dict_to_update) mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_should_not_override_resource_properties(self, mock_put, mock_ensure_resource): dict_to_update = {"name": "test", "type": "anotherType"} uri = "/rest/testuri" mock_put.return_value = None, self.response_body expected = {"name": "test", "type": "anotherType", "uri": uri} self.resource_client.update(dict_to_update) mock_put.assert_called_once_with(uri, expected, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") def test_update_without_default_values(self, mock_put, mock_ensure_resource): uri = "/rest/testuri" dict_to_update = {"name": "test"} expected = {"name": "test", "uri": uri, "type": "typeV300"} mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update) mock_put.assert_called_once_with(uri, expected, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") @mock.patch.object(TaskMonitor, "wait_for_task") def test_update_uri(self, mock_wait4task, mock_update, mock_ensure_resource): uri = "/rest/testuri" dict_to_update = {"resource_data": "resource_data", "uri": uri} expected = {"resource_data": "resource_data", "uri": uri, "type": "typeV300"} mock_update.return_value = self.task, self.response_body mock_wait4task.return_value = self.task self.resource_client.update(dict_to_update, False) self.assertEqual(self.task, self.resource_client.data) mock_update.assert_called_once_with(uri, expected, custom_headers=None) @mock.patch.object(Resource, "ensure_resource_data") @mock.patch.object(connection, "put") @mock.patch.object(TaskMonitor, "wait_for_task") def test_update_return_entity(self, mock_wait4task, mock_put, mock_ensure_resource): uri = "/rest/testuri" dict_to_update = {"resource_name": "a name", "uri": uri} mock_put.return_value = self.task, {} mock_wait4task.return_value = dict_to_update self.resource_client.update(dict_to_update, timeout=-1) self.assertEqual(self.resource_client.data, dict_to_update) @mock.patch.object(Resource, "get_by") def test_get_by_name_with_result(self, mock_get_by): self.resource_client.get_by_name("Resource Name,") mock_get_by.assert_called_once_with("name", "Resource Name,") @mock.patch.object(Resource, "get_by") def test_get_by_name_without_result(self, mock_get_by): mock_get_by.return_value = [] response = self.resource_client.get_by_name("Resource Name,") self.assertIsNone(response) mock_get_by.assert_called_once_with("name", "Resource Name,") @mock.patch.object(connection, "get") def test_get_by_uri(self, mock_get): self.resource_client.get_by_uri("/rest/testuri") mock_get.assert_called_once_with('/rest/testuri') @mock.patch.object(connection, "get") def test_get_by_id_with_result(self, mock_get): self.resource_client.get_by_id("123") mock_get.assert_called_once_with("/rest/testuri/123") @mock.patch.object(connection, "get") def test_get_by_id_without_result(self, mock_get): mock_get.return_value = [] response = self.resource_client.get_by_id("123") self.assertIsNone(response) mock_get.assert_called_once_with("/rest/testuri/123") @mock.patch.object(connection, "get") def test_get_collection_uri(self, mock_get): mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]} self.resource_helper.get_collection() mock_get.assert_called_once_with(self.URI) @mock.patch.object(connection, "get") def test_get_collection_with_filter(self, mock_get): mock_get.return_value = {} self.resource_helper.get_collection(filter="name=name") mock_get.assert_called_once_with(self.URI + "?filter=name%3Dname") @mock.patch.object(connection, "get") def test_get_collection_with_path(self, mock_get): mock_get.return_value = {} self.resource_helper.get_collection(path="/test") mock_get.assert_called_once_with(self.URI + "/test") @mock.patch.object(connection, "get") def test_get_collection_with_multiple_filters(self, mock_get): mock_get.return_value = {} self.resource_helper.get_collection(filter=["name1=one", "name2=two", "name=three"]) mock_get.assert_called_once_with(self.URI + "?filter=name1%3Done&filter=name2%3Dtwo&filter=name%3Dthree") @mock.patch.object(connection, "get") def test_get_collection_should_return_list(self, mock_get): mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]} collection = self.resource_helper.get_collection() self.assertEqual(len(collection), 2) def test_build_uri_with_id_should_work(self): input = "09USE7335NW35" expected_output = "/rest/testuri/09USE7335NW35" result = self.resource_client._helper.build_uri(input) self.assertEqual(expected_output, result) def test_build_uri_with_uri_should_work(self): input = "/rest/testuri/09USE7335NW3" expected_output = "/rest/testuri/09USE7335NW3" result = self.resource_client._helper.build_uri(input) self.assertEqual(expected_output, result) def test_build_uri_with_none_should_raise_exception(self): try: self.resource_client._helper.build_uri(None) except ValueError as exception: self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_empty_str_should_raise_exception(self): try: self.resource_client._helper.build_uri('') except ValueError as exception: self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_different_resource_uri_should_raise_exception(self): try: self.resource_client._helper.build_uri( "/rest/test/another/resource/uri/09USE7335NW3") except exceptions.HPOneViewUnknownType as exception: self.assertEqual(UNRECOGNIZED_URI, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_incomplete_uri_should_raise_exception(self): try: self.resource_client._helper.build_uri("/rest/") except exceptions.HPOneViewUnknownType as exception: self.assertEqual(UNRECOGNIZED_URI, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_subresource_uri(self): options = [ dict( resource="1", subresource="2", path="sub", uri="/rest/testuri/1/sub/2"), dict( resource="/rest/testuri/3", subresource="4", path="sub", uri="/rest/testuri/3/sub/4"), dict( resource="5", subresource="/rest/testuri/5/sub/6", path="sub", uri="/rest/testuri/5/sub/6"), dict( resource="/rest/testuri/7", subresource="/rest/testuri/7/sub/8", path="sub", uri="/rest/testuri/7/sub/8"), dict( resource=None, subresource="/rest/testuri/9/sub/10", path="sub", uri="/rest/testuri/9/sub/10"), dict( resource="/rest/testuri/11", subresource="12", path="/sub/", uri="/rest/testuri/11/sub/12"), dict( resource="/rest/testuri/13", subresource=None, path="/sub/", uri="/rest/testuri/13/sub"), ] for option in options: uri = self.resource_client._helper.build_subresource_uri(option["resource"], option["subresource"], option["path"]) self.assertEqual(uri, option["uri"]) def test_build_subresource_uri_with_subresourceid_and_without_resource_should_fail(self): try: self.resource_client._helper.build_subresource_uri(None, "123456", "sub-path") except exceptions.HPOneViewValueError as exception: self.assertEqual(RESOURCE_ID_OR_URI_REQUIRED, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_merge_resources(self): resource1 = {"name": "resource1", "type": "resource"} resource2 = {"name": "resource2", "port": "1"} expected_resource = {"name": "resource2", "type": "resource", "port": "1"} merged_resource = merge_resources(resource1, resource2) self.assertEqual(merged_resource, expected_resource) def test_merge_default_values(self): default_type = {"type": "type1"} resource1 = {"name": "resource1"} resource2 = {"name": "resource2"} result_list = merge_default_values([resource1, resource2], default_type) expected_list = [ {"name": "resource1", "type": "type1"}, {"name": "resource2", "type": "type1"} ] self.assertEqual(result_list, expected_list) def test_raise_unavailable_method_exception(self): self.assertRaises(exceptions.HPOneViewUnavailableMethod, unavailable_method) class FakeResource(object): def __init__(self, con): self._connection = con self._client = ResourceClient(con, "/rest/fake/resource") def get_fake(self, uri): return self._client.get(uri) class ResourceClientTest(unittest.TestCase): URI = "/rest/testuri" TYPE_V200 = 'typeV200' TYPE_V300 = 'typeV300' DEFAULT_VALUES = { '200': {'type': TYPE_V200}, '300': {'type': TYPE_V300} } def setUp(self): super(ResourceClientTest, self).setUp() self.host = '127.0.0.1' self.connection = connection(self.host, 300) self.resource_client = ResourceClient(self.connection, self.URI) self.task = {"task": "task", "taskState": "Finished"} self.response_body = {"body": "body"} self.custom_headers = {'Accept-Language': 'en_US'} @mock.patch.object(connection, 'get') def test_get_all_called_once(self, mock_get): filter = "'name'='OneViewSDK \"Test FC Network'" sort = 'name:ascending' query = "name NE 'WrongName'" view = '"{view-name}"' scope_uris = '/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a' mock_get.return_value = {"members": [{"member": "member"}]} result = self.resource_client.get_all( 1, 500, filter, query, sort, view, 'name,owner,modified', scope_uris=scope_uris) uri = '{resource_uri}?start=1' \ '&count=500' \ '&filter=%27name%27%3D%27OneViewSDK%20%22Test%20FC%20Network%27' \ '&query=name%20NE%20%27WrongName%27' \ '&sort=name%3Aascending' \ '&view=%22%7Bview-name%7D%22' \ '&fields=name%2Cowner%2Cmodified' \ '&scopeUris=/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'.format(resource_uri=self.URI) self.assertEqual([{'member': 'member'}], result) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, 'get') def test_get_all_with_defaults(self, mock_get): self.resource_client.get_all() uri = "{resource_uri}?start=0&count=-1".format(resource_uri=self.URI) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, 'get') def test_get_all_with_custom_uri(self, mock_get): self.resource_client.get_all(uri='/rest/testuri/12467836/subresources') uri = "/rest/testuri/12467836/subresources?start=0&count=-1" mock_get.assert_called_once_with(uri) @mock.patch.object(connection, 'get') def test_get_all_with_custom_uri_and_query_string(self, mock_get): self.resource_client.get_all(uri='/rest/testuri/12467836/subresources?param=value') uri = "/rest/testuri/12467836/subresources?param=value&start=0&count=-1" mock_get.assert_called_once_with(uri) @mock.patch.object(connection, 'get') def test_get_all_with_different_resource_uri_should_fail(self, mock_get): try: self.resource_client.get_all(uri='/rest/other/resource/12467836/subresources') except exceptions.HPOneViewUnknownType as e: self.assertEqual(UNRECOGNIZED_URI, e.args[0]) else: self.fail('Expected Exception was not raised') @mock.patch.object(connection, 'get') def test_get_all_should_do_multi_requests_when_response_paginated(self, mock_get): uri_list = ['/rest/testuri?start=0&count=-1', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=3'] results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]}, {'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]}, {'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}] mock_get.side_effect = results self.resource_client.get_all() expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])] self.assertEqual(mock_get.call_args_list, expected_calls) @mock.patch.object(connection, 'get') def test_get_all_with_count_should_do_multi_requests_when_response_paginated(self, mock_get): uri_list = ['/rest/testuri?start=0&count=15', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=3'] results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]}, {'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]}, {'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}] mock_get.side_effect = results self.resource_client.get_all(count=15) expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])] self.assertEqual(mock_get.call_args_list, expected_calls) @mock.patch.object(connection, 'get') def test_get_all_should_return_all_items_when_response_paginated(self, mock_get): uri_list = ['/rest/testuri?start=0&count=-1', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=1'] results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]}, {'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]}, {'nextPageUri': None, 'members': [{'id': '7'}]}] mock_get.side_effect = results result = self.resource_client.get_all() expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}] self.assertSequenceEqual(result, expected_items) @mock.patch.object(connection, 'get') def test_get_all_should_limit_results_to_requested_count_when_response_is_paginated(self, mock_get): uri_list = ['/rest/testuri?start=0&count=15', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=1'] results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]}, {'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]}, {'nextPageUri': None, 'members': [{'id': '7'}]}] mock_get.side_effect = results result = self.resource_client.get_all(count=15) expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}] self.assertSequenceEqual(result, expected_items) @mock.patch.object(connection, 'get') def test_get_all_should_stop_requests_when_requested_count_reached(self, mock_get): """ In this case, the user provides a maximum number of results to be returned but for pagination purposes, a nextPageUri is returned by OneView. """ uri_list = ['/rest/testuri?start=0&count=3', '/rest/testuri?start=3&count=3', '/rest/testuri?start=6&count=3'] results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]}, {'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]}, {'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}] mock_get.side_effect = results self.resource_client.get_all(count=3) mock_get.assert_called_once_with(uri_list[0]) @mock.patch.object(connection, 'get') def test_get_all_should_stop_requests_when_next_page_is_equal_to_current_page(self, mock_get): uri = '/rest/testuri?start=0&count=-1' members = [{'id': '1'}, {'id': '2'}, {'id': '3'}] mock_get.return_value = { 'nextPageUri': uri, 'members': members, 'uri': uri } result = self.resource_client.get_all() self.assertSequenceEqual(result, members) mock_get.assert_called_once_with(uri) @mock.patch.object(connection, 'get') def test_get_all_should_return_empty_list_when_response_has_no_items(self, mock_get): mock_get.return_value = {'nextPageUri': None, 'members': []} result = self.resource_client.get_all() self.assertEqual(result, []) @mock.patch.object(connection, 'get') def test_get_all_should_return_empty_list_when_no_members(self, mock_get): mock_get.return_value = {'nextPageUri': None, 'members': None} result = self.resource_client.get_all() self.assertEqual(result, []) @mock.patch.object(connection, 'delete') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_delete_all_called_once(self, mock_wait4task, mock_delete): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task filter = "name='Exchange Server'" uri = "/rest/testuri?filter=name%3D%27Exchange%20Server%27&force=True" self.resource_client.delete_all(filter=filter, force=True, timeout=-1) mock_delete.assert_called_once_with(uri) @mock.patch.object(connection, 'delete') def test_delete_all_should_return_true(self, mock_delete): mock_delete.return_value = None, self.response_body filter = "name='Exchange Server'" result = self.resource_client.delete_all(filter=filter, force=True, timeout=-1) self.assertTrue(result) @mock.patch.object(connection, 'delete') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_delete_all_should_wait_for_task(self, mock_wait4task, mock_delete): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task filter = "name='Exchange Server'" delete_task = self.resource_client.delete_all(filter=filter, force=True, timeout=-1) mock_wait4task.assert_called_with(self.task, timeout=-1) self.assertEqual(self.task, delete_task) @mock.patch.object(connection, 'delete') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_delete_by_id_called_once(self, mock_wait4task, mock_delete): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task delete_task = self.resource_client.delete('1', force=True, timeout=-1) self.assertEqual(self.task, delete_task) mock_delete.assert_called_once_with(self.URI + "/1?force=True", custom_headers=None) @mock.patch.object(connection, 'delete') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_delete_with_custom_headers(self, mock_wait4task, mock_delete): mock_delete.return_value = self.task, self.response_body mock_wait4task.return_value = self.task self.resource_client.delete('1', custom_headers=self.custom_headers) mock_delete.assert_called_once_with(mock.ANY, custom_headers={'Accept-Language': 'en_US'}) def test_delete_dict_invalid_uri(self): dict_to_delete = {"task": "task", "uri": ""} try: self.resource_client.delete(dict_to_delete, False, -1) except exceptions.HPOneViewUnknownType as e: self.assertEqual("Unknown object type", e.args[0]) else: self.fail() @mock.patch.object(connection, 'get') def test_get_schema_uri(self, mock_get): self.resource_client.get_schema() mock_get.assert_called_once_with(self.URI + "/schema") @mock.patch.object(connection, 'get') def test_get_by_id_uri(self, mock_get): self.resource_client.get('12345') mock_get.assert_called_once_with(self.URI + "/12345") @mock.patch.object(ResourceClient, 'get_by') def test_get_by_name_with_result(self, mock_get_by): mock_get_by.return_value = [{"name": "value"}] response = self.resource_client.get_by_name('Resource Name,') self.assertEqual(response, {"name": "value"}) mock_get_by.assert_called_once_with("name", 'Resource Name,') @mock.patch.object(ResourceClient, 'get_by') def test_get_by_name_without_result(self, mock_get_by): mock_get_by.return_value = [] response = self.resource_client.get_by_name('Resource Name,') self.assertIsNone(response) mock_get_by.assert_called_once_with("name", 'Resource Name,') @mock.patch.object(connection, 'get') def test_get_collection_uri(self, mock_get): mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]} self.resource_client.get_collection('12345') mock_get.assert_called_once_with(self.URI + "/12345") @mock.patch.object(connection, 'get') def test_get_collection_with_filter(self, mock_get): mock_get.return_value = {} self.resource_client.get_collection('12345', 'name=name') mock_get.assert_called_once_with(self.URI + "/12345?filter=name%3Dname") @mock.patch.object(connection, 'get') def test_get_collection_with_multiple_filters(self, mock_get): mock_get.return_value = {} self.resource_client.get_collection('12345', ['name1=one', 'name2=two', 'name=three']) mock_get.assert_called_once_with(self.URI + "/12345?filter=name1%3Done&filter=name2%3Dtwo&filter=name%3Dthree") @mock.patch.object(connection, 'get') def test_get_collection_should_return_list(self, mock_get): mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]} collection = self.resource_client.get_collection('12345') self.assertEqual(len(collection), 2) @mock.patch.object(ResourceClient, 'get_all') def test_get_by_property(self, mock_get_all): self.resource_client.get_by('name', 'MyFibreNetwork') mock_get_all.assert_called_once_with(filter="\"name='MyFibreNetwork'\"", uri='/rest/testuri') @mock.patch.object(ResourceClient, 'get_all') def test_get_by_with_incorrect_result_autofix(self, mock_get_all): mock_get_all.return_value = [{"name": "EXpected"}, {"name": "not expected"}] response = self.resource_client.get_by('name', 'exPEcted') self.assertEqual(response, [{"name": "EXpected"}]) mock_get_all.assert_called_once_with(filter="\"name='exPEcted'\"", uri='/rest/testuri') @mock.patch.object(ResourceClient, 'get_all') def test_get_by_with_incorrect_result_skip_autofix(self, mock_get_all): mock_get_all.return_value = [{"name": "expected"}, {"name": "not expected"}] response = self.resource_client.get_by('connection.name', 'expected') self.assertEqual(response, [{'name': 'expected'}, {'name': 'not expected'}]) mock_get_all.assert_called_once_with(filter="\"connection.name='expected'\"", uri='/rest/testuri') @mock.patch.object(ResourceClient, 'get_all') def test_get_by_property_with_uri(self, mock_get_all): self.resource_client.get_by('name', 'MyFibreNetwork', uri='/rest/testuri/5435534/sub') mock_get_all.assert_called_once_with(filter="\"name='MyFibreNetwork'\"", uri='/rest/testuri/5435534/sub') @mock.patch.object(ResourceClient, 'get_all') def test_get_by_property_with__invalid_uri(self, mock_get_all): try: self.resource_client.get_by('name', 'MyFibreNetwork', uri='/rest/other/5435534/sub') except exceptions.HPOneViewUnknownType as e: self.assertEqual('Unrecognized URI for this resource', e.args[0]) else: self.fail() @mock.patch.object(connection, 'put') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_update_with_zero_body_called_once(self, mock_wait4task, mock_update): mock_update.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.update_with_zero_body('/rest/enclosures/09USE133E5H4/configuration', timeout=-1) mock_update.assert_called_once_with( "/rest/enclosures/09USE133E5H4/configuration", None, custom_headers=None) @mock.patch.object(connection, 'put') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_update_with_zero_body_and_custom_headers(self, mock_wait4task, mock_update): mock_update.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.update_with_zero_body('1', custom_headers=self.custom_headers) mock_update.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'}) @mock.patch.object(connection, 'put') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_update_with_zero_body_return_entity(self, mock_wait4task, mock_put): response_body = {"resource_name": "name"} mock_put.return_value = self.task, self.task mock_wait4task.return_value = response_body result = self.resource_client.update_with_zero_body( '/rest/enclosures/09USE133E5H4/configuration', timeout=-1) self.assertEqual(result, response_body) @mock.patch.object(connection, 'put') def test_update_with_zero_body_without_task(self, mock_put): mock_put.return_value = None, self.response_body result = self.resource_client.update_with_zero_body( '/rest/enclosures/09USE133E5H4/configuration', timeout=-1) self.assertEqual(result, self.response_body) @mock.patch.object(connection, 'put') def test_update_with_uri_called_once(self, mock_put): dict_to_update = {"name": "test"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body response = self.resource_client.update(dict_to_update, uri=uri) self.assertEqual(self.response_body, response) mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None) @mock.patch.object(connection, 'put') def test_update_with_custom_headers(self, mock_put): dict_to_update = {"name": "test"} mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update, uri="/path", custom_headers=self.custom_headers) mock_put.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'}) @mock.patch.object(connection, 'put') def test_update_with_force(self, mock_put): dict_to_update = {"name": "test"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update, uri=uri, force=True) expected_uri = "/rest/resource/test?force=True" mock_put.assert_called_once_with(expected_uri, dict_to_update, custom_headers=None) @mock.patch.object(connection, 'put') def test_update_with_api_version_200(self, mock_put): dict_to_update = {"name": "test"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body self.connection._apiVersion = 200 expected_dict = {"name": "test", "type": self.TYPE_V200} self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES) mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None) @mock.patch.object(connection, 'put') def test_update_with_default_api_version_300(self, mock_put): dict_to_update = {"name": "test"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body expected_dict = {"name": "test", "type": self.TYPE_V300} self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES) mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None) @mock.patch.object(connection, 'put') def test_update_should_not_override_resource_properties(self, mock_put): dict_to_update = {"name": "test", "type": "anotherType"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES) mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None) @mock.patch.object(connection, 'put') def test_update_without_default_values(self, mock_put): dict_to_update = {"name": "test"} uri = "/rest/resource/test" mock_put.return_value = None, self.response_body resource_client = ResourceClient(self.connection, self.URI) resource_client.update(dict_to_update, uri=uri) mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None) @mock.patch.object(connection, 'put') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_update_uri(self, mock_wait4task, mock_update): dict_to_update = {"resource_data": "resource_data", "uri": "a_uri"} mock_update.return_value = self.task, self.response_body mock_wait4task.return_value = self.task update_task = self.resource_client.update(dict_to_update, False) self.assertEqual(self.task, update_task) mock_update.assert_called_once_with("a_uri", dict_to_update, custom_headers=None) @mock.patch.object(connection, 'put') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_update_return_entity(self, mock_wait4task, mock_put): dict_to_update = { "resource_name": "a name", "uri": "a_uri", } mock_put.return_value = self.task, {} mock_wait4task.return_value = dict_to_update result = self.resource_client.update(dict_to_update, timeout=-1) self.assertEqual(result, dict_to_update) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_create_with_zero_body_called_once(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body('/rest/enclosures/09USE133E5H4/configuration', timeout=-1) mock_post.assert_called_once_with( "/rest/enclosures/09USE133E5H4/configuration", {}, custom_headers=None) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_create_with_zero_body_called_once_without_uri(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body(timeout=-1) mock_post.assert_called_once_with( '/rest/testuri', {}, custom_headers=None) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_create_with_zero_body_and_custom_headers(self, mock_wait4task, mock_post): mock_post.return_value = self.task, self.task mock_wait4task.return_value = self.task self.resource_client.create_with_zero_body('1', custom_headers=self.custom_headers) mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'}) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_create_with_zero_body_return_entity(self, mock_wait4task, mock_post): response_body = {"resource_name": "name"} mock_post.return_value = self.task, self.task mock_wait4task.return_value = response_body result = self.resource_client.create_with_zero_body( '/rest/enclosures/09USE133E5H4/configuration', timeout=-1) self.assertEqual(result, response_body) @mock.patch.object(connection, 'post') def test_create_with_zero_body_without_task(self, mock_post): mock_post.return_value = None, self.response_body result = self.resource_client.create_with_zero_body( '/rest/enclosures/09USE133E5H4/configuration', timeout=-1) self.assertEqual(result, self.response_body) @mock.patch.object(connection, 'post') def test_create_uri(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} self.resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_uri_with_force(self, mock_post): dict_to_create = {"resource_name": "a name", "force": "yes"} mock_post.return_value = {}, {} self.resource_client.create(dict_to_create, timeout=-1) expected_uri = "/rest/testuri" mock_post.assert_called_once_with(expected_uri, dict_to_create, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_with_api_version_200(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} self.connection._apiVersion = 200 expected_dict = {"resource_name": "a name", "type": self.TYPE_V200} self.resource_client.create(dict_to_create, timeout=-1, default_values=self.DEFAULT_VALUES) mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_with_default_api_version_300(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} expected_dict = {"resource_name": "a name", "type": self.TYPE_V300} self.resource_client.create(dict_to_create, timeout=-1, default_values=self.DEFAULT_VALUES) mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_should_not_override_resource_properties(self, mock_post): dict_to_create = {"resource_name": "a name", "type": "anotherType"} mock_post.return_value = {}, {} self.resource_client.create(dict_to_create, default_values=self.DEFAULT_VALUES) mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_without_default_values(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} resource_client = ResourceClient(self.connection, self.URI) resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None) @mock.patch.object(connection, 'post') def test_create_with_custom_headers(self, mock_post): dict_to_create = {"resource_name": "a name"} mock_post.return_value = {}, {} self.resource_client.create(dict_to_create, custom_headers=self.custom_headers) mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'}) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_create_return_entity(self, mock_wait4task, mock_post): dict_to_create = { "resource_name": "a name", } created_resource = { "resource_id": "123", "resource_name": "a name", } mock_post.return_value = self.task, {} mock_wait4task.return_value = created_resource result = self.resource_client.create(dict_to_create, -1) self.assertEqual(result, created_resource) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_wait_for_activity_on_create(self, mock_wait4task, mock_post): mock_post.return_value = self.task, {} mock_wait4task.return_value = self.task self.resource_client.create({"test": "test"}, timeout=60) mock_wait4task.assert_called_once_with(self.task, 60) @mock.patch.object(connection, 'patch') def test_patch_request_when_id_is_provided_v200(self, mock_patch): request_body = [{ 'op': 'replace', 'path': '/name', 'value': 'new_name', }] mock_patch.return_value = {}, {} self.connection._apiVersion = 200 self.resource_client.patch( '123a53cz', 'replace', '/name', 'new_name', 70) mock_patch.assert_called_once_with( '/rest/testuri/123a53cz', request_body, custom_headers={}) @mock.patch.object(connection, 'patch') def test_patch_request_when_id_is_provided_v300(self, mock_patch): request_body = [{ 'op': 'replace', 'path': '/name', 'value': 'new_name', }] mock_patch.return_value = {}, {} resource_client = ResourceClient(self.connection, self.URI) resource_client.patch( '123a53cz', 'replace', '/name', 'new_name', 70) mock_patch.assert_called_once_with( '/rest/testuri/123a53cz', request_body, custom_headers={'Content-Type': 'application/json-patch+json'}) @mock.patch.object(connection, 'patch') def test_patch_request_when_uri_is_provided(self, mock_patch): request_body = [{ 'op': 'replace', 'path': '/name', 'value': 'new_name', }] mock_patch.return_value = {}, {} self.resource_client.patch( '/rest/testuri/123a53cz', 'replace', '/name', 'new_name', 60) mock_patch.assert_called_once_with( '/rest/testuri/123a53cz', request_body, custom_headers={'Content-Type': 'application/json-patch+json'}) @mock.patch.object(connection, 'patch') def test_patch_with_custom_headers_v200(self, mock_patch): mock_patch.return_value = {}, {} self.connection._apiVersion = 200 self.resource_client.patch('/rest/testuri/123', 'operation', '/field', 'value', custom_headers=self.custom_headers) mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'}) @mock.patch.object(connection, 'patch') def test_patch_with_custom_headers_v300(self, mock_patch): mock_patch.return_value = {}, {} resource_client = ResourceClient(self.connection, self.URI) resource_client.patch('/rest/testuri/123', 'operation', '/field', 'value', custom_headers=self.custom_headers) mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US', 'Content-Type': 'application/json-patch+json'}) @mock.patch.object(connection, 'patch') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_patch_return_entity(self, mock_wait4task, mock_patch): entity = {"resource_id": "123a53cz"} mock_patch.return_value = self.task, self.task mock_wait4task.return_value = entity result = self.resource_client.patch( '123a53cz', 'replace', '/name', 'new_name', -1) self.assertEqual(result, entity) @mock.patch.object(connection, 'patch') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_patch_request_custom_headers_with_content_type(self, mock_task, mock_patch): dict_info = {"resource_name": "a name"} mock_patch.return_value = {}, {} headers = {'Content-Type': 'application/json', 'Extra': 'extra'} self.connection._apiVersion = 300 resource_client = ResourceClient(self.connection, self.URI) resource_client.patch_request('/rest/testuri/id', body=dict_info, custom_headers=headers) mock_patch.assert_called_once_with('/rest/testuri/id', dict_info, custom_headers=headers) @mock.patch.object(connection, 'patch') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_patch_request_custom_headers(self, mock_task, mock_patch): dict_info = {"resource_name": "a name"} mock_patch.return_value = {}, {} headers = {'Extra': 'extra'} self.connection._apiVersion = 300 resource_client = ResourceClient(self.connection, self.URI) resource_client.patch_request('/rest/testuri/id', body=dict_info, custom_headers=headers) mock_patch.assert_called_once_with( '/rest/testuri/id', dict_info, custom_headers={'Extra': 'extra', 'Content-Type': 'application/json-patch+json'}) @mock.patch.object(connection, 'patch') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_wait_for_activity_on_patch(self, mock_wait4task, mock_patch): entity = {"resource_id": "123a53cz"} mock_patch.return_value = self.task, self.task mock_wait4task.return_value = entity self.resource_client.patch( '123a53cz', 'replace', '/name', 'new_name', -1) mock_wait4task.assert_called_once_with(self.task, mock.ANY) def test_delete_with_none(self): try: self.resource_client.delete(None) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() @mock.patch.object(connection, 'delete') def test_delete_with_dict_uri(self, mock_delete): resource = {"uri": "uri"} mock_delete.return_value = {}, {} delete_result = self.resource_client.delete(resource) self.assertTrue(delete_result) mock_delete.assert_called_once_with("uri", custom_headers=None) def test_delete_with_empty_dict(self): try: self.resource_client.delete({}) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() def test_get_with_none(self): try: self.resource_client.get(None) except ValueError as e: self.assertTrue("id" in e.args[0]) else: self.fail() def test_get_collection_with_none(self): try: self.resource_client.get_collection(None) except ValueError as e: self.assertTrue("id" in e.args[0]) else: self.fail() def test_create_with_none(self): try: self.resource_client.create(None) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() def test_create_with_empty_dict(self): try: self.resource_client.create({}) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() def test_update_with_none(self): try: self.resource_client.update(None) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() def test_update_with_empty_dict(self): try: self.resource_client.update({}) except ValueError as e: self.assertTrue("Resource" in e.args[0]) else: self.fail() def test_get_by_with_name_none(self): try: self.resource_client.get_by(None, None) except ValueError as e: self.assertTrue("field" in e.args[0]) else: self.fail() @mock.patch.object(connection, 'get') def test_get_with_uri_should_work(self, mock_get): mock_get.return_value = {} uri = self.URI + "/ad28cf21-8b15-4f92-bdcf-51cb2042db32" self.resource_client.get(uri) mock_get.assert_called_once_with(uri) def test_get_with_uri_with_incompatible_url_shoud_fail(self): message = "Unrecognized URI for this resource" uri = "/rest/interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32" try: self.resource_client.get(uri) except exceptions.HPOneViewUnknownType as exception: self.assertEqual(message, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_get_with_uri_from_another_resource_with_incompatible_url_shoud_fail(self): message = "Unrecognized URI for this resource" uri = "/rest/interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32" fake_resource = FakeResource(None) try: fake_resource.get_fake(uri) except exceptions.HPOneViewUnknownType as exception: self.assertEqual(message, exception.args[0]) else: self.fail("Expected Exception was not raised") @mock.patch.object(connection, 'get') def test_get_utilization_with_args(self, mock_get): self.resource_client.get_utilization('09USE7335NW3', fields='AmbientTemperature,AveragePower,PeakPower', filter='startDate=2016-05-30T03:29:42.361Z', refresh=True, view='day') expected_uri = '/rest/testuri/09USE7335NW3/utilization' \ '?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z' \ '&fields=AmbientTemperature%2CAveragePower%2CPeakPower' \ '&refresh=true' \ '&view=day' mock_get.assert_called_once_with(expected_uri) @mock.patch.object(connection, 'get') def test_get_utilization_with_multiple_filters(self, mock_get): self.resource_client.get_utilization( '09USE7335NW3', fields='AmbientTemperature,AveragePower,PeakPower', filter=['startDate=2016-05-30T03:29:42.361Z', 'endDate=2016-05-31T03:29:42.361Z'], refresh=True, view='day') expected_uri = '/rest/testuri/09USE7335NW3/utilization' \ '?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z' \ '&filter=endDate%3D2016-05-31T03%3A29%3A42.361Z' \ '&fields=AmbientTemperature%2CAveragePower%2CPeakPower' \ '&refresh=true' \ '&view=day' mock_get.assert_called_once_with(expected_uri) @mock.patch.object(connection, 'get') def test_get_utilization_by_id_with_defaults(self, mock_get): self.resource_client.get_utilization('09USE7335NW3') expected_uri = '/rest/testuri/09USE7335NW3/utilization' mock_get.assert_called_once_with(expected_uri) @mock.patch.object(connection, 'get') def test_get_utilization_by_uri_with_defaults(self, mock_get): self.resource_client.get_utilization('/rest/testuri/09USE7335NW3') expected_uri = '/rest/testuri/09USE7335NW3/utilization' mock_get.assert_called_once_with(expected_uri) def test_get_utilization_with_empty(self): try: self.resource_client.get_utilization('') except ValueError as exception: self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_id_should_work(self): input = '09USE7335NW35' expected_output = '/rest/testuri/09USE7335NW35' result = self.resource_client.build_uri(input) self.assertEqual(expected_output, result) def test_build_uri_with_uri_should_work(self): input = '/rest/testuri/09USE7335NW3' expected_output = '/rest/testuri/09USE7335NW3' result = self.resource_client.build_uri(input) self.assertEqual(expected_output, result) def test_build_uri_with_none_should_raise_exception(self): try: self.resource_client.build_uri(None) except ValueError as exception: self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_empty_str_should_raise_exception(self): try: self.resource_client.build_uri('') except ValueError as exception: self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_different_resource_uri_should_raise_exception(self): try: self.resource_client.build_uri( '/rest/test/another/resource/uri/09USE7335NW3') except exceptions.HPOneViewUnknownType as exception: self.assertEqual(UNRECOGNIZED_URI, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_uri_with_incomplete_uri_should_raise_exception(self): try: self.resource_client.build_uri('/rest/') except exceptions.HPOneViewUnknownType as exception: self.assertEqual(UNRECOGNIZED_URI, exception.args[0]) else: self.fail("Expected Exception was not raised") def test_build_subresource_uri(self): options = [ dict( resource='1', subresource='2', path='sub', uri='/rest/testuri/1/sub/2'), dict( resource='/rest/testuri/3', subresource='4', path='sub', uri='/rest/testuri/3/sub/4'), dict( resource='5', subresource='/rest/testuri/5/sub/6', path='sub', uri='/rest/testuri/5/sub/6'), dict( resource='/rest/testuri/7', subresource='/rest/testuri/7/sub/8', path='sub', uri='/rest/testuri/7/sub/8'), dict( resource=None, subresource='/rest/testuri/9/sub/10', path='sub', uri='/rest/testuri/9/sub/10'), dict( resource='/rest/testuri/11', subresource='12', path='/sub/', uri='/rest/testuri/11/sub/12'), dict( resource='/rest/testuri/13', subresource=None, path='/sub/', uri='/rest/testuri/13/sub'), ] for option in options: uri = self.resource_client.build_subresource_uri(option['resource'], option['subresource'], option['path']) self.assertEqual(uri, option['uri']) def test_build_subresource_uri_with_subresourceid_and_without_resource_should_fail(self): try: self.resource_client.build_subresource_uri(None, "123456", 'sub-path') except exceptions.HPOneViewValueError as exception: self.assertEqual(RESOURCE_ID_OR_URI_REQUIRED, exception.args[0]) else: self.fail("Expected Exception was not raised") @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_create_report_should_do_post_request(self, mock_get_completed_task, mock_post): task_with_output = self.task.copy() task_with_output['taskOutput'] = [] mock_post.return_value = self.task, {} mock_get_completed_task.return_value = task_with_output self.resource_client.create_report("/rest/path/create-report") mock_post.assert_called_once_with("/rest/path/create-report", {}) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_create_report_should_wait_task_completion(self, mock_get_completed_task, mock_post): task_with_output = self.task.copy() task_with_output['taskOutput'] = [] mock_post.return_value = self.task, {} mock_get_completed_task.return_value = task_with_output self.resource_client.create_report("/rest/path/create-report", timeout=60) mock_get_completed_task.assert_called_once_with(self.task, 60) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_create_report_should_return_output_list_when_results(self, mock_get_completed_task, mock_post): task_output = [ {"type": "FCIssueResponseV2", "created": "2015-03-24T15: 32: 50.889Z"}, {"type": "FCIssueResponseV2", "created": "2015-03-13T14: 10: 50.322Z"} ] task_with_output = self.task.copy() task_with_output['taskOutput'] = task_output mock_post.return_value = self.task, {} mock_get_completed_task.return_value = task_with_output result = self.resource_client.create_report("/rest/path/create-report") self.assertEqual(result, task_output) @mock.patch.object(connection, 'post') @mock.patch.object(TaskMonitor, 'get_completed_task') def test_create_report_should_return_empty_list_when_output_is_empty(self, mock_get_completed_task, mock_post): task_with_output = self.task.copy() task_with_output['taskOutput'] = [] mock_post.return_value = self.task, {} mock_get_completed_task.return_value = task_with_output result = self.resource_client.create_report("/rest/path/create-report") self.assertEqual(result, []) @mock.patch.object(connection, 'post') def test_create_report_should_raise_exception_when_not_task(self, mock_post): task_with_output = self.task.copy() task_with_output['taskOutput'] = [] mock_post.return_value = None, {} try: self.resource_client.create_report("/rest/path/create-report") except exceptions.HPOneViewException as exception: self.assertEqual(RESOURCE_CLIENT_TASK_EXPECTED, exception.args[0]) else: self.fail("Expected Exception was not raised") @mock.patch.object(connection, 'post') def test_create_when_the_resource_is_a_list(self, mock_post): dict_to_create = [{"resource_name": "a name"}] mock_post.return_value = {}, {} resource_client = ResourceClient(self.connection, self.URI) resource_client.create(dict_to_create, timeout=-1) mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None) def test_merge_api_default_values(self): resource = {'name': 'resource1'} default_values = { '200': {"type": "EnclosureGroupV200"}, '300': {"type": "EnclosureGroupV300"} } expected = {'name': 'resource1', "type": "EnclosureGroupV300"} resource_client = ResourceClient(self.connection, self.URI) result = resource_client.merge_default_values(resource, default_values) self.assertEqual(result, expected) def test_should_not_merge_when_default_values_not_defined(self): resource = {'name': 'resource1'} default_values = {} expected = {'name': 'resource1'} resource_client = ResourceClient(self.connection, self.URI) result = resource_client.merge_default_values(resource, default_values) self.assertEqual(result, expected) @mock.patch.object(connection, 'post_multipart_with_response_handling') def test_upload_should_call_post_multipart(self, mock_post_multipart): uri = '/rest/testuri/' filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath, uri) mock_post_multipart.assert_called_once_with(uri, filepath, 'SPPgen9snap6.2015_0405.81.iso') @mock.patch.object(connection, 'post_multipart_with_response_handling') def test_upload_should_call_post_multipart_with_resource_uri_when_not_uri_provided(self, mock_post_multipart): filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath) mock_post_multipart.assert_called_once_with('/rest/testuri', mock.ANY, mock.ANY) @mock.patch.object(connection, 'post_multipart_with_response_handling') @mock.patch.object(TaskMonitor, 'wait_for_task') @mock.patch.object(connection, 'get') def test_upload_should_wait_for_task_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart): uri = '/rest/testuri/' filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = self.task, mock.Mock() self.resource_client.upload(filepath, uri) mock_wait4task.assert_called_once_with(self.task, -1) @mock.patch.object(connection, 'post_multipart_with_response_handling') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_upload_should_not_wait_for_task_when_response_is_not_task(self, mock_wait4task, mock_post_multipart): uri = '/rest/testuri/' filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, mock.Mock() self.resource_client.upload(filepath, uri) mock_wait4task.not_been_called() @mock.patch.object(connection, 'post_multipart_with_response_handling') @mock.patch.object(TaskMonitor, 'wait_for_task') @mock.patch.object(connection, 'get') def test_upload_should_return_associated_resource_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart): fake_associated_resurce = mock.Mock() uri = '/rest/testuri/' filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = self.task, mock.Mock() mock_wait4task.return_value = fake_associated_resurce result = self.resource_client.upload(filepath, uri) self.assertEqual(result, fake_associated_resurce) @mock.patch.object(connection, 'post_multipart_with_response_handling') @mock.patch.object(TaskMonitor, 'wait_for_task') def test_upload_should_return_resource_when_response_is_not_task(self, mock_wait4task, mock_post_multipart): fake_response_body = mock.Mock() uri = '/rest/testuri/' filepath = "test/SPPgen9snap6.2015_0405.81.iso" mock_post_multipart.return_value = None, fake_response_body result = self.resource_client.upload(filepath, uri) self.assertEqual(result, fake_response_body) @mock.patch.object(connection, 'download_to_stream') @mock.patch(mock_builtin('open')) def test_download_should_call_download_to_stream_with_given_uri(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315' mock_open.return_value = io.StringIO() self.resource_client.download(uri, file_path) mock_download_to_stream.assert_called_once_with(mock.ANY, uri, custom_headers=mock.ANY) @mock.patch.object(connection, 'download_to_stream') @mock.patch(mock_builtin('open')) def test_download_should_call_download_to_stream_with_open_file(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315' fake_file = io.StringIO() mock_open.return_value = fake_file self.resource_client.download(uri, file_path) mock_open.assert_called_once_with(file_path, 'wb') mock_download_to_stream.assert_called_once_with(fake_file, uri, custom_headers=mock.ANY) @mock.patch.object(connection, 'download_to_stream') @mock.patch(mock_builtin('open')) def test_download_should_return_true_when_success(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315' mock_download_to_stream.return_value = True mock_open.return_value = io.StringIO() result = self.resource_client.download(uri, file_path) self.assertTrue(result) @mock.patch.object(connection, 'download_to_stream') @mock.patch(mock_builtin('open')) def test_download_should_return_false_when_error(self, mock_open, mock_download_to_stream): file_path = "~/archive.log" uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315' mock_download_to_stream.return_value = False mock_open.return_value = io.StringIO() result = self.resource_client.download(uri, file_path) self.assertFalse(result) def test_transform_list_to_dict(self): list = ['one', 'two', {'tree': 3}, 'four', 5] dict_transformed = transform_list_to_dict(list=list) self.assertEqual(dict_transformed, {'5': True, 'four': True, 'one': True, 'tree': 3, 'two': True}) def test_extract_id_from_uri(self): uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155' id = '3518be0e-17c1-4189-8f81-83f3724f6155' extracted_id = extract_id_from_uri(uri) self.assertEqual(id, extracted_id) def test_extract_id_from_uri_with_extra_slash(self): uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155/' extracted_id = extract_id_from_uri(uri) self.assertEqual(extracted_id, '') def test_extract_id_from_uri_passing_id(self): uri = '3518be0e-17c1-4189-8f81-83f3724f6155' extracted_id = extract_id_from_uri(uri) self.assertEqual(extracted_id, '3518be0e-17c1-4189-8f81-83f3724f6155') def test_extract_id_from_uri_unsupported(self): # This example is not supported yet uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155/otherthing' extracted_id = extract_id_from_uri(uri) self.assertEqual(extracted_id, 'otherthing')
42.21117
127
0.665727
102,003
0.985117
0
0
82,450
0.79628
0
0
20,164
0.194738
5d72ba13d02bd291e6fb1bfd1e3d024e5c0779f3
316
py
Python
workalendar/usa/colorado.py
vanadium23/workalendar
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
[ "MIT" ]
null
null
null
workalendar/usa/colorado.py
vanadium23/workalendar
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
[ "MIT" ]
null
null
null
workalendar/usa/colorado.py
vanadium23/workalendar
4c67b5a7900fa56d7a93b767c6cbd8f1cc6b70a7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from .core import UnitedStates class Colorado(UnitedStates): """Colorado""" # Colorado has only federal state holidays. # NOTE: Cesar Chavez Day is an optional holiday
26.333333
66
0.677215
148
0.468354
0
0
0
0
0
0
127
0.401899
5d72e502922a05a806cd65843fb08ea2947b5e7b
3,358
py
Python
dataloaders/augmentation.py
thierrypin/gei-pool
0a9e79b01148735f0e975c50d2476e41ba20af4f
[ "MIT" ]
null
null
null
dataloaders/augmentation.py
thierrypin/gei-pool
0a9e79b01148735f0e975c50d2476e41ba20af4f
[ "MIT" ]
null
null
null
dataloaders/augmentation.py
thierrypin/gei-pool
0a9e79b01148735f0e975c50d2476e41ba20af4f
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import random import numpy as np # Generic data augmentation class Augmenter: """ Generic data augmentation class with chained operations """ def __init__(self, ops=[]): if not isinstance(ops, list): print("Error: ops must be a list of functions") quit() self.ops = ops def add(self, op): self.ops.append(op) def augment(self, img): aug = img.copy() for op in self.ops: aug = op(aug) return aug def __call__(self, img): return self.augment(img) ########## # Images # ########## def horizontal_flip(p=0.5): def fc(img): if random.random() < p: return img[..., ::-1] else: return img return fc def vertical_flip(p=0.5): def fc(img): if random.random() < p: return img[..., ::-1, :] else: return img return fc def gaussian_noise(p=0.5, mean=0, sigma=0.02): def fc(img): if random.random() < p: gauss = np.random.normal(mean, sigma, img.shape).astype(np.float32) return img + gauss else: return img return fc def black_vstripe(p=0.5, size=10): def fc(img): if random.random() < p: j = int(random.random() * (img.shape[1]-size)) img[..., j:j+size] = 0 return img else: return img return fc def black_hstripe(p=0.5, size=10): def fc(img): if random.random() < p: j = int(random.random() * (img.shape[0]-size)) img[..., j:j+size, :] = 0 return img else: return img return fc def default_augmenter(p=0.5, strip_size=3, mean=0, sigma=0.02): """Default data augmentation with horizontal flip, vertical flip, gaussian noise, black hstripe, and black vstripe. Returns: Augmenter object. Use as: aug.augment(img) """ print("Using default image augmenter") return Augmenter([ horizontal_flip(p), gaussian_noise(p, mean, sigma), black_hstripe(p, size=strip_size), black_vstripe(p, size=strip_size) ]) ########## # Videos # ########## def horizontal_flip_vid(p=0.5): def fc(vid): if random.random() < p: return vid[..., ::-1] else: return vid return fc def black_vstripe_vid(p=0.5, size=10): def fc(batch): if random.random() < p: j = int(random.random() * (batch.shape[-1]-size)) batch[..., j:j+size] = 0 return batch else: return batch return fc def black_hstripe_vid(p=0.5, size=10): def fc(batch): if random.random() < p: j = int(random.random() * (batch.shape[-2]-size)) batch[..., j:j+size, :] = 0 return batch else: return batch return fc def default_augmenter_vid(p=0.5, strip_size=3, mean=0, sigma=0.02): """Default data augmentation with horizontal flip, gaussian noise, black hstripe, and black vstripe. Returns: Augmenter object. Use as: aug.augment(img) """ return Augmenter([ horizontal_flip_vid(p), gaussian_noise(p, mean, sigma), black_hstripe_vid(p, size=strip_size), black_vstripe_vid(p, size=strip_size) ])
25.24812
158
0.549136
511
0.152174
0
0
0
0
0
0
638
0.189994
5d72ea525d5fca207b00f29574de0ed2864d8b1b
7,229
py
Python
cigeo/admin.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
1
2019-05-26T22:24:01.000Z
2019-05-26T22:24:01.000Z
cigeo/admin.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
6
2019-01-22T14:53:43.000Z
2020-09-22T16:20:28.000Z
cigeo/admin.py
CzechInvest/ciis
c6102598f564a717472e5e31e7eb894bba2c8104
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.gis import geos from leaflet.admin import LeafletGeoAdmin, LeafletGeoAdminMixin from .models import Lau1 from .models import Nuts3 from .models import Airport from .models import Road from .models import PublicTransportStop from .models import RailwayStation from django.urls import reverse from django.utils.translation import ugettext_lazy as _ import nested_admin import uuid import json class AirportAdmin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class RoadAdmin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class RailwayStationAdmin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class PublicTransportStopAdmin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class LAU1Admin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class NUTS3Admin(LeafletGeoAdmin): default_zoom = 7 default_lon = 1730000 default_lat = 6430000 #readonly_fields = ("code", "name",) class NUTS3AdminInline(LeafletGeoAdminMixin, admin.StackedInline): model = Nuts3 class LAU1AdminInline(LeafletGeoAdminMixin, admin.StackedInline): model = Lau1 class NUTS3Filter(admin.SimpleListFilter): """Filter for admin interface of NUTS3 regions (Kraje) """ title = _('NUTS3 regions') parameter_name = 'nuts3#' def lookups(self, request, model_admin): nuts3 = Nuts3.objects.all() return ( (obj.id, obj.name) for obj in nuts3 ) def queryset(self, request, queryset): val = self.value() if val: nuts3 = Nuts3.objects.get(pk=val) results = queryset.filter( location__geometry__intersects=nuts3.geometry) else: results = queryset return results class ArealFieldAdmin(nested_admin.NestedModelAdmin): geojson_attributes = [] def get_place(self, obj): if hasattr(obj.location, "address") and \ obj.location.address is not None: return obj.location.address.city else: return ", ".join( [l.__str__() for l in Nuts3.objects.filter( geometry__intersects=obj.location.geometry)]) def get_search_results(self, request, queryset, search_term): """Add NUTS3 (by name) search and area size search (using `<>` operator) """ result, use_distinct = super( ArealFieldAdmin, self).get_search_results( request, queryset, search_term) if search_term: if len(result) == 0 or len(result) == len(queryset): result = self._search_lay1_nuts3_by_name( queryset, search_term) if len(result) == 0 or len(result) == len(queryset): result = self._search_area(queryset, search_term) return (result, use_distinct) def _search_lay1_nuts3_by_name(self, queryset, search_term): """Search NUTS3 (kraje) and LAU1 (okresy) region according to name """ filtered = queryset.none() for cls in (Lau1, Nuts3): objs = cls.objects.filter(name__startswith=search_term) for o in objs: objects = queryset.filter( location__geometry__intersects=o.geometry) filtered |= objects return filtered def _search_area(self, queryset, search_term): """Search all features, where MIN < area.total < MAX """ filtered = queryset.none() if search_term.find("<>") > -1: area_min, area_max = [float(x) for x in search_term.split("<>")] filtered = queryset.filter( areal__area__total__gte=area_min, areal__area__total__lte=area_max) return filtered def changelist_view(self, request, extra_context=None): """Adjust change list view add GeoJSON encoded data for the queryset """ extra_context = extra_context or {} response = super().changelist_view( request, extra_context=extra_context, ) if hasattr(response, "context_data"): filtered_query_set = response.context_data["cl"].queryset extra_context['objects_data'] = \ json.dumps(self.as_geojson(filtered_query_set)) response.context_data.update(extra_context) return response def as_geojson(self, queryset): if self.geojson_attributes: attributes = self.geojson_attributes else: attributes = [] data = { "type": "FeatureCollection", "features": [] } for obj in queryset: geom = None if hasattr(obj, "location_set"): multipoint = geos.MultiPoint( [loc.address.coordinates for loc in obj.location_set.all()]) geom = multipoint.centroid elif hasattr(obj, "location"): geom = obj.location.geometry.centroid elif hasattr(obj, "geom"): geom = obj.geom elif hasattr(obj, "address"): geom = obj.address.coordinates if geom: title = None if hasattr(obj, "title"): title = obj.title elif hasattr(obj, "name"): title = obj.name if type(obj.pk) == uuid.UUID: id = str(obj.pk) else: id = obj.pk feature = { "type": "Feature", "properties": { "name": title, "object_url": reverse('admin:{}_{}_change'.format( obj._meta.app_label, obj._meta.model_name), args=(obj.pk,)), }, "geometry": json.loads(geom.json), "id": id } for attribute in attributes: if hasattr(obj, attribute): value = getattr(obj, attribute.__str__()) if type(value) == uuid.UUID: feature[attribute] = str(value) else: feature[attribute] = value #print(feature) data["features"].append(feature) return data # Register your models here. admin.site.register(Lau1, LAU1Admin) admin.site.register(Nuts3, NUTS3Admin) admin.site.register(Road, RoadAdmin) admin.site.register(PublicTransportStop, PublicTransportStopAdmin) admin.site.register(RailwayStation, RailwayStationAdmin) admin.site.register(Airport, AirportAdmin)
30.761702
80
0.580025
6,447
0.891825
0
0
0
0
0
0
887
0.1227
5d730d1afb5f1402b6e9a016eacea8ab0f918612
858
py
Python
umbra/monitor/main.py
RafaelAPB/umbra
cf075bbe73e46540e9edee25f9ec3d0828620d5f
[ "Apache-2.0" ]
null
null
null
umbra/monitor/main.py
RafaelAPB/umbra
cf075bbe73e46540e9edee25f9ec3d0828620d5f
[ "Apache-2.0" ]
null
null
null
umbra/monitor/main.py
RafaelAPB/umbra
cf075bbe73e46540e9edee25f9ec3d0828620d5f
[ "Apache-2.0" ]
null
null
null
import logging import json import asyncio from google.protobuf import json_format from umbra.common.protobuf.umbra_grpc import MonitorBase from umbra.common.protobuf.umbra_pb2 import Instruction, Snapshot from umbra.monitor.tools import Tools logger = logging.getLogger(__name__) logging.getLogger("hpack").setLevel(logging.WARNING) class Monitor(MonitorBase): def __init__(self, info): self.tools = Tools() async def Listen(self, stream): logging.debug("Instruction Received") instruction: Instruction = await stream.recv_message() instruction_dict = json_format.MessageToDict(instruction, preserving_proto_field_name=True) snapshot_dict = await self.tools.handle(instruction_dict) snapshot = json_format.ParseDict(snapshot_dict, Snapshot()) await stream.send_message(snapshot)
31.777778
99
0.757576
518
0.60373
0
0
0
0
426
0.496503
29
0.0338
5d733960d2eb830da7ca11bb10495536367425c3
6,207
py
Python
pycs/spells/hunters_mark.py
dwagon/pycs
4d02acbf380526d3bf0380f6bb8b757a827024b8
[ "MIT" ]
null
null
null
pycs/spells/hunters_mark.py
dwagon/pycs
4d02acbf380526d3bf0380f6bb8b757a827024b8
[ "MIT" ]
null
null
null
pycs/spells/hunters_mark.py
dwagon/pycs
4d02acbf380526d3bf0380f6bb8b757a827024b8
[ "MIT" ]
null
null
null
"""https://www.dndbeyond.com/spells/hunters-mark""" from unittest.mock import patch import dice from pycs.constant import ActionCategory from pycs.constant import SpellType from pycs.creature import Creature from pycs.effect import Effect from pycs.gear import Shortbow from pycs.spell import SpellAction from pycs.spells.spelltest import SpellTest ############################################################################## ############################################################################## ############################################################################## class HuntersMark(SpellAction): """You choose a creature you can see within range and mystically mark it as your quarry. Until the spell ends, you deal an extra 1d6 damage to the target whenever you hit it with a weapon attack, and you have advantage on any Wisdom (Perception) or Wisdom (Survival) check you make to find it. If the target drops to 0 hit points before this spell ends, you can use a bonus action on a subsequent turn of yours to mark a new creature. At Higher Levels. When you cast this spell using a spell slot of 3rd or 4th level, you can maintain your concentration on the spell for up to 8 hours. When you use a spell slot of 5th level or higher, you can maintain your concentration on the spell for up to 24 hours.""" ########################################################################## def __init__(self, **kwargs): name = "Hunters Mark" kwargs.update( { "category": ActionCategory.BONUS, "concentration": SpellType.CONCENTRATION, "level": 1, "reach": 90, "type": SpellType.BUFF, } ) super().__init__(name, **kwargs) self._victim = None ########################################################################## def heuristic(self): """Should we do the spell""" if self.pick_target(): return 6 print("No enemy in range") return 0 ########################################################################## def pick_target(self): """Who should we do the spell to""" for enemy in self.owner.pick_closest_enemy(): if self.owner.distance(enemy) > self.range()[0]: continue if enemy.has_effect("Hunters Mark"): continue self.target = enemy return enemy return None ########################################################################## def cast(self): """Do the spell""" self._victim = self.target self._victim.add_effect(HuntersMarkEffect(caster=self.owner)) print(f"Cast Hunters Mark on {self._victim}") ########################################################################## def end_concentration(self): """What happens when we stop concentrating""" if self._victim: print(f"Removing Hunters Mark from {self._victim}") self._victim.remove_effect("Hunters Mark") self._victim = None ############################################################################## ############################################################################## ############################################################################## class HuntersMarkEffect(Effect): """Hunters Mark Effect""" ########################################################################## def __init__(self, **kwargs): """Initialise""" super().__init__("Hunters Mark", **kwargs) ########################################################################## def hook_target_additional_damage(self, _, source, target): """More damage""" if source == self.caster: return ("1d6", 0, None) return ("", 0, None) ############################################################################## ############################################################################## ############################################################################## class TestHuntersMark(SpellTest): """Test Spell""" ########################################################################## def setUp(self): """test setup""" super().setUp() self.caster.add_action(HuntersMark()) ########################################################################## def test_cast(self): """test casting""" self.caster.options_this_turn = [ActionCategory.BONUS] self.assertFalse(self.enemy.has_effect("Hunters Mark")) self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=False) self.assertTrue(self.enemy.has_effect("Hunters Mark")) ########################################################################## def test_effect(self): """Test the effect of casting the spell""" print(self.caster.arena) self.caster.moves = 99 self.caster.options_this_turn = [ActionCategory.BONUS, ActionCategory.ACTION] self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=True) self.assertTrue(self.enemy.has_effect("Hunters Mark")) self.caster.add_gear(Shortbow()) self.assertEqual(len(self.enemy.damage_this_turn), 0) with patch.object(Creature, "rolld20") as mock: mock.return_value = 18 with patch.object(dice, "roll") as mock_dice: mock_dice.return_value = 5 self.caster.do_stuff(categ=ActionCategory.ACTION, moveto=True) print(f"{self.enemy.damage_this_turn=}") self.assertEqual(len(self.enemy.damage_this_turn), 2) ########################################################################## def test_removal(self): """Test the effect gets removed""" self.caster.options_this_turn = [ActionCategory.BONUS] self.caster.do_stuff(categ=ActionCategory.BONUS, moveto=False) self.assertTrue(self.enemy.has_effect("Hunters Mark")) self.caster.remove_concentration() self.assertFalse(self.enemy.has_effect("Hunters Mark")) # EOF
41.10596
85
0.475914
5,129
0.826325
0
0
0
0
0
0
2,961
0.477042
5d7378673807f7e0283f1553a575bc82a4166826
390
py
Python
utilities.py
armandok/pySLAM-D
ef7398806e021885b29702adf55acbedaf544ce6
[ "MIT" ]
10
2020-12-24T16:40:46.000Z
2022-02-01T18:09:13.000Z
utilities.py
armandok/pySLAM-D
ef7398806e021885b29702adf55acbedaf544ce6
[ "MIT" ]
null
null
null
utilities.py
armandok/pySLAM-D
ef7398806e021885b29702adf55acbedaf544ce6
[ "MIT" ]
null
null
null
import numpy as np def rot_to_angle(rot): return np.arccos(0.5*np.trace(rot)-0.5) def rot_to_heading(rot): # This function calculates the heading angle of the rot matrix w.r.t. the y-axis new_rot = rot[0:3:2, 0:3:2] # remove the mid row and column corresponding to the y-axis new_rot = new_rot/np.linalg.det(new_rot) return np.arctan2(new_rot[1, 0], new_rot[0, 0])
30
92
0.694872
0
0
0
0
0
0
0
0
139
0.35641
5d7393f1d5071cf9d02bab3da993f038421d4d57
1,175
py
Python
robosuite/models/grippers/__init__.py
kyungjaelee/robosuite
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
[ "MIT" ]
397
2020-09-28T02:49:58.000Z
2022-03-30T18:08:19.000Z
robosuite/models/grippers/__init__.py
kyungjaelee/robosuite
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
[ "MIT" ]
169
2020-09-28T02:17:59.000Z
2022-03-29T13:32:43.000Z
robosuite/models/grippers/__init__.py
kyungjaelee/robosuite
0d73fcca9ed8e638632f4bd7b0f1b8ebf4640fb1
[ "MIT" ]
131
2020-09-28T14:50:35.000Z
2022-03-31T02:27:33.000Z
from .gripper_model import GripperModel from .gripper_factory import gripper_factory from .gripper_tester import GripperTester from .panda_gripper import PandaGripper from .rethink_gripper import RethinkGripper from .robotiq_85_gripper import Robotiq85Gripper from .robotiq_three_finger_gripper import RobotiqThreeFingerGripper, RobotiqThreeFingerDexterousGripper from .panda_gripper import PandaGripper from .jaco_three_finger_gripper import JacoThreeFingerGripper, JacoThreeFingerDexterousGripper from .robotiq_140_gripper import Robotiq140Gripper from .wiping_gripper import WipingGripper from .null_gripper import NullGripper GRIPPER_MAPPING = { "RethinkGripper": RethinkGripper, "PandaGripper": PandaGripper, "JacoThreeFingerGripper": JacoThreeFingerGripper, "JacoThreeFingerDexterousGripper": JacoThreeFingerDexterousGripper, "WipingGripper": WipingGripper, "Robotiq85Gripper": Robotiq85Gripper, "Robotiq140Gripper": Robotiq140Gripper, "RobotiqThreeFingerGripper": RobotiqThreeFingerGripper, "RobotiqThreeFingerDexterousGripper": RobotiqThreeFingerDexterousGripper, None: NullGripper, } ALL_GRIPPERS = GRIPPER_MAPPING.keys()
39.166667
103
0.846809
0
0
0
0
0
0
0
0
202
0.171915
5d7596fcdc1125f69dea760f3f07ca8ccf07185d
7,509
py
Python
src/pose/visualizations/visualizations.py
Idein/chainer-hand-pose
45c7b629a74bf13da8cc9b47d0ded7099c139e9b
[ "Apache-2.0" ]
11
2019-12-14T07:55:52.000Z
2021-06-22T06:38:34.000Z
src/pose/visualizations/visualizations.py
terasakisatoshi/chainer-hand-pose
a47e0c61c4fea3369db566eea3d539d1c9398bf7
[ "Apache-2.0" ]
1
2020-06-17T21:39:48.000Z
2020-06-26T13:16:43.000Z
src/pose/visualizations/visualizations.py
terasakisatoshi/chainer-hand-pose
a47e0c61c4fea3369db566eea3d539d1c9398bf7
[ "Apache-2.0" ]
3
2019-12-11T13:47:54.000Z
2020-10-23T07:10:15.000Z
import logging logger = logging.getLogger(__name__) import random import chainercv import numpy as np from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D # NOQA from pose.hand_dataset.geometry_utils import normalize_joint_zyx from pose.hand_dataset.image_utils import normalize_depth # Decimal Code (R,G,B) BASE_COLOR = { "RED": (255, 0, 0), "GREEN": (0, 255, 0), "BLUE": (0, 0, 255), "YELLOW": (255, 255, 0), "CYAN": (0, 255, 255), "MAGENTA": (255, 0, 255), } def vis_image(img, ax=None): """ extend chainercv.visualizations.vis_image """ C, H, W = img.shape if C == 1: if ax is None: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) # remove channnel dimension ax.imshow(img.squeeze()) else: ax = chainercv.visualizations.vis_image(img, ax) return ax def preprocess(point, ax, img): input_point = np.asarray(point) if input_point.ndim == 2: input_point = np.expand_dims(point, axis=0) H, W = None, None if ax is None: fig = plt.figure() if input_point.shape[-1] == 3: ax = fig.add_subplot(1, 1, 1, projection="3d") else: ax = fig.add_subplot(1, 1, 1) if img is not None: ax = vis_image(img, ax=ax) _, H, W = img.shape return input_point, ax, H, W def vis_point(point, img=None, color=None, ax=None): """ Visualize points in an image, customized to our purpose. Base implementation is taken from chainercv.visualizations.vis_image """ point, ax, H, W = preprocess(point, ax, img) n_inst = len(point) c = np.asarray(color) / 255. if color is not None else None for i in range(n_inst): # note that the shape of `point[i]` is (K,N) and the format of one is (y, x), (z,y,x). # (K, N) -> (N, K) pts = point[i].transpose() # (K,N) -> (N,K) # resort coordinate order : yx -> xy or zyx -> xyz pts = pts[::-1] ax.scatter(*pts, c=c) if W is not None: ax.set_xlim(left=0, right=W) if H is not None: ax.set_ylim(bottom=H - 1, top=0) return ax def vis_edge(point, indices, img=None, color=None, ax=None): """ Visualize edges in an image """ point, ax, H, W = preprocess(point, ax, img) n_inst = len(point) if color is not None: color = np.asarray(color) / 255. else: color = [None] * len(indices) for i in range(n_inst): # note that the shape of `point[i]` is (K,N) and the format of one is (y, x) or (z,y,x). pts = point[i] for ((s, t), c) in zip(indices, color): # Select point which consists edge. It is a pair or point (start, target). # Note that [::-1] does resort coordinate order: yx -> xy or zyx -> xyz edge = pts[[s, t]].transpose() edge = edge[::-1] ax.plot(*edge, c=c) if W is not None: ax.set_xlim(left=0, right=W) if H is not None: ax.set_ylim(bottom=H - 1, top=0) return ax def vis_pose(point, indices, img=None, point_color=None, edge_color=None, ax=None): ax = vis_point(point, img=img, color=point_color, ax=ax) vis_edge(point, indices, img=img, color=edge_color, ax=ax) def visualize_both(dataset, keypoint_names, edges, color_map, normalize=False): import random idx = random.randint(0, len(dataset) - 1) logger.info("get example") example = dataset.get_example(idx) logger.info("Done get example") fig = plt.figure(figsize=(8, 8)) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(223, projection="3d") ax4 = fig.add_subplot(224, projection="3d") color = [color_map[k] for k in keypoint_names] edge_color = [color_map[s, t] for s, t in edges] depth = example["depth"].astype(np.float32) depth_joint = example["depth_joint"] depth_camera = example["depth_camera"] depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True) z_size = example["param"]["z_size"] if normalize: depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size) depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size) rgb = example["rgb"] rgb_joint = example["rgb_joint"] rgb_camera = example["rgb_camera"] rgb_vu = rgb_camera.zyx2vu(rgb_joint) rgb_joint = normalize_joint_zyx(rgb_joint, rgb_camera, z_size) print(example["param"]) vis_point(rgb_vu, img=rgb, color=color, ax=ax1) vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1) vis_point(rgb_joint, color=color, ax=ax3) vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3) vis_point(depth_vu, img=depth, color=color, ax=ax2) vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2) vis_point(depth_joint, color=color, ax=ax4) vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4) for ax in [ax3, ax4]: ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") ax.view_init(-65, -90) plt.savefig("output.png") plt.show() def visualize_rgb(dataset, keypoint_names, edges, color_map, idx=None): import random if idx is None: idx = random.randint(0, len(dataset) - 1) logger.info("get example") example = dataset.get_example(idx) logger.info("Done get example") fig = plt.figure(figsize=(5, 10)) ax1 = fig.add_subplot(211) ax3 = fig.add_subplot(212, projection="3d") color = [color_map[k] for k in keypoint_names] edge_color = [color_map[s, t] for s, t in edges] rgb = example["rgb"] rgb_joint = example["rgb_joint"] rgb_camera = example["rgb_camera"] rgb_vu = rgb_camera.zyx2vu(rgb_joint) vis_point(rgb_vu, img=rgb, color=color, ax=ax1) vis_edge(rgb_vu, indices=edges, color=edge_color, ax=ax1) vis_point(rgb_joint, color=color, ax=ax3) vis_edge(rgb_joint, indices=edges, color=edge_color, ax=ax3) for ax in [ax3]: ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") ax.view_init(-65, -90) plt.savefig("output.png") plt.show() def visualize_depth(dataset, keypoint_names, edges, color_map, normalize=False): idx = random.randint(0, len(dataset) - 1) logger.info("get example") example = dataset.get_example(idx) logger.info("Done get example") fig = plt.figure(figsize=(5, 10)) ax2 = fig.add_subplot(211) ax4 = fig.add_subplot(212, projection="3d") color = [color_map[k] for k in keypoint_names] edge_color = [color_map[s, t] for s, t in edges] depth = example["depth"].astype(np.float32) depth_joint = example["depth_joint"] depth_camera = example["depth_camera"] depth_vu, depth_z = depth_camera.zyx2vu(depth_joint, return_z=True) z_size = example["param"]["z_size"] if normalize: depth = normalize_depth(depth, z_com=depth_z.mean(), z_size=z_size) depth_joint = normalize_joint_zyx(depth_joint, depth_camera, z_size) print(example["param"]) vis_point(depth_vu, img=depth, color=color, ax=ax2) vis_edge(depth_vu, indices=edges, color=edge_color, ax=ax2) vis_point(depth_joint, color=color, ax=ax4) vis_edge(depth_joint, indices=edges, color=edge_color, ax=ax4) for ax in [ax4]: ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") ax.view_init(-65, -90) plt.savefig("output.png") plt.show()
31.953191
96
0.632574
0
0
0
0
0
0
0
0
1,088
0.144893
5d75bb550217d28f2cb95a0798b2a193f98c5dc4
190
py
Python
publication-erdf/flask_service.py
ticapix/automated-tasks
a0c73ad2939c6f1a2d91aea6fd309b5005455191
[ "Unlicense" ]
null
null
null
publication-erdf/flask_service.py
ticapix/automated-tasks
a0c73ad2939c6f1a2d91aea6fd309b5005455191
[ "Unlicense" ]
null
null
null
publication-erdf/flask_service.py
ticapix/automated-tasks
a0c73ad2939c6f1a2d91aea6fd309b5005455191
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 from flask import Flask app = Flask(__name__) @app.route('/process-email') def process_email(): return "Hello World!" if __name__ == "__main__": app.run()
14.615385
28
0.673684
0
0
0
0
75
0.394737
0
0
62
0.326316
536f43c7085300c239b6e7cb90378b2df33381ad
1,134
py
Python
tools/nn/speaker.py
mikiec84/speaking_detection
ed680138627c156e1f7b0af20d6517e2bea754cc
[ "MIT" ]
null
null
null
tools/nn/speaker.py
mikiec84/speaking_detection
ed680138627c156e1f7b0af20d6517e2bea754cc
[ "MIT" ]
null
null
null
tools/nn/speaker.py
mikiec84/speaking_detection
ed680138627c156e1f7b0af20d6517e2bea754cc
[ "MIT" ]
null
null
null
import os import skimage.io from torch.nn import Module import torch.nn from torchvision.models import resnet18 from nn.speaker_dataset import Dataset # @UnusedImport os.environ['TORCH_MODEL_ZOO'] = '../data/' VIDTIMIT_PATH = '../data/vidtimit/' skimage.io.use_plugin('pil') class Net(Module): def __init__(self): super().__init__() resnet = resnet18(pretrained=True) self.features = torch.nn.Sequential(*list(resnet.children())[:-1]) self.classifier = torch.nn.Sequential( torch.nn.Linear(512, 2) ) # print(len(list(self.features.parameters()))) for p in list(self.features.parameters())[:20]: p.requires_grad = False def forward(self, x, **kw): # X = F.softmax(self.basenet(X)) f = self.features(x) f = f.view(f.size(0), -1) y = self.classifier(f) return y def get_speaking_detector_final(): m = torch.load('../data/speaker.pt') m = m.eval(); return m def get_speaking_detector(e): m = torch.load('../data/speaker/model.e{}.pt'.format(e)) m = m.eval(); return m
23.625
74
0.613757
618
0.544974
0
0
0
0
0
0
194
0.171076
536ff8da70c0647265f2448d9db35e0d757a366c
1,551
py
Python
tensorflow_model_analysis/util_test.py
mdreves/model-analysis
73760b27b763e322a92ea80ff0a768ad9ef74526
[ "Apache-2.0" ]
null
null
null
tensorflow_model_analysis/util_test.py
mdreves/model-analysis
73760b27b763e322a92ea80ff0a768ad9ef74526
[ "Apache-2.0" ]
null
null
null
tensorflow_model_analysis/util_test.py
mdreves/model-analysis
73760b27b763e322a92ea80ff0a768ad9ef74526
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple tests for util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow_model_analysis import util class UtilTest(tf.test.TestCase): def testKwargsOnly(self): @util.kwargs_only def fn(a, b, c, d=None, e=5): if d is None: d = 100 if e is None: e = 1000 return a + b + c + d + e self.assertEqual(1 + 2 + 3 + 100 + 5, fn(a=1, b=2, c=3)) self.assertEqual(1 + 2 + 3 + 100 + 1000, fn(a=1, b=2, c=3, e=None)) with self.assertRaisesRegexp(TypeError, 'keyword-arguments only'): fn(1, 2, 3) with self.assertRaisesRegexp(TypeError, 'with c specified'): fn(a=1, b=2, e=5) # pylint: disable=no-value-for-parameter with self.assertRaisesRegexp(TypeError, 'with extraneous kwargs'): fn(a=1, b=2, c=3, f=11) # pylint: disable=unexpected-keyword-arg if __name__ == '__main__': tf.test.main()
32.3125
74
0.691812
720
0.464217
0
0
155
0.099936
0
0
746
0.48098
5370c3d3d7c64120cfceac3826e677a88c4d71af
3,556
py
Python
laia/data/transforms/vision/random_beta_morphology.py
eivtho/PyLaia
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
[ "MIT" ]
89
2018-12-12T23:06:26.000Z
2022-02-03T09:04:21.000Z
laia/data/transforms/vision/random_beta_morphology.py
eivtho/PyLaia
2a7a6e2eeb9b5af68c0faed0c564b02063e72be0
[ "MIT" ]
30
2019-03-06T14:29:48.000Z
2022-03-16T14:53:43.000Z
laia/data/transforms/vision/random_beta_morphology.py
jpuigcerver/PyLaia
1b2e864247f1bfb8d95ac1910de9c52df71c017a
[ "MIT" ]
26
2018-12-13T17:48:19.000Z
2022-02-28T12:52:29.000Z
from typing import List, Tuple, Union import numpy as np import scipy.special from PIL import Image, ImageFilter class RandomBetaMorphology: def __init__( self, filter_size_min: int, filter_size_max: int, alpha: float, beta: float ) -> None: assert filter_size_min % 2 != 0, "Filter size must be odd" assert filter_size_max % 2 != 0, "Filter size must be odd" self.filter_size_min = filter_size_min self.filter_size_max = filter_size_max self.alpha = alpha self.beta = beta self.filter_sizes, self.filter_probs = self._create_filter_distribution( filter_size_min, filter_size_max, alpha, beta ) @staticmethod def _create_filter_distribution( filter_size_min: int, filter_size_max: int, alpha: float, beta: float ) -> Tuple[List[int], Union[List[float], np.ndarray]]: n = (filter_size_max - filter_size_min) // 2 + 1 if n < 2: return [filter_size_min], np.asarray([1.0], dtype=np.float32) filter_sizes = [] filter_probs = [] for k in range(n): filter_sizes.append(filter_size_min + 2 * k) filter_probs.append( scipy.special.comb(n, k) * scipy.special.beta(alpha + k, n - k + beta) ) np_filter_probs = np.asarray(filter_probs, dtype=np.float32) np_filter_probs = filter_probs / np_filter_probs.sum() return filter_sizes, np_filter_probs def sample_filter_size(self): filter_size = np.random.choice(self.filter_sizes, p=self.filter_probs) return filter_size def __call__(self, *args, **kwargs): return NotImplementedError def __repr__(self) -> str: return ( f"vision.{self.__class__.__name__}(" f"filter_size_min={self.filter_size_min}, " f"filter_size_max={self.filter_size_max}, " f"alpha={self.alpha}, beta={self.beta})" ) class Dilate(RandomBetaMorphology): def __init__( self, filter_size_min: int = 3, filter_size_max: int = 7, alpha: float = 1, beta: float = 3, ) -> None: super().__init__(filter_size_min, filter_size_max, alpha, beta) def __call__(self, img: Image) -> Image: filter_size = self.sample_filter_size() return img.filter(ImageFilter.MaxFilter(filter_size)) class Erode(RandomBetaMorphology): def __init__( self, filter_size_min: int = 3, filter_size_max: int = 5, alpha: float = 1, beta: float = 3, ) -> None: super().__init__(filter_size_min, filter_size_max, alpha, beta) def __call__(self, img: Image) -> Image: filter_size = self.sample_filter_size() return img.filter(ImageFilter.MinFilter(filter_size)) if __name__ == "__main__": import argparse from PIL import ImageOps parser = argparse.ArgumentParser() parser.add_argument("--operation", choices=("dilate", "erode"), default="dilate") parser.add_argument("images", type=argparse.FileType("rb"), nargs="+") args = parser.parse_args() transformer = Dilate() if args.operation == "dilate" else Erode() for f in args.images: x = Image.open(f, "r").convert("L") x = ImageOps.invert(x) y = transformer(x) w, h = x.size z = Image.new("L", (w, 2 * h)) z.paste(x, (0, 0)) z.paste(y, (0, h)) z = z.resize(size=(w // 2, h), resample=Image.BICUBIC) z.show() input()
32.327273
86
0.615298
2,710
0.762092
0
0
783
0.220191
0
0
290
0.081552
537138998ce86bd69153421493a543bbc8be7c36
723
py
Python
hemp/internal/utils.py
Addvilz/hemp
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
[ "Apache-2.0" ]
1
2020-08-13T22:28:28.000Z
2020-08-13T22:28:28.000Z
hemp/internal/utils.py
Addvilz/hemp
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
[ "Apache-2.0" ]
null
null
null
hemp/internal/utils.py
Addvilz/hemp
2cd1d437fc59a8f7b24f5d150c623bf75c3b6747
[ "Apache-2.0" ]
null
null
null
import sys from fabric.utils import error, puts from git import RemoteProgress def print_err(message, func=None, exception=None, stdout=None, stderr=None): error('[Hemp] ' + message, func, exception, stdout, stderr) def print_info(text, show_prefix=None, end="\n", flush=True): puts('[Hemp] ' + text, show_prefix, end, flush) def print_git_output(stdout): for line in stdout.split('\n'): sys.stdout.write('[GIT] ' + line + '\n') sys.stdout.flush() class SimpleProgressPrinter(RemoteProgress): def _parse_progress_line(self, line): if '\r' in line: line = line.replace('\r', '\r[GIT] ') sys.stdout.write('[GIT] ' + line + '\n') sys.stdout.flush()
26.777778
76
0.637621
237
0.327801
0
0
0
0
0
0
68
0.094053
53713acb71d2f50fa7d7472d8e125a179f1d5d33
417
py
Python
backend/links/sentence.py
dla1635/hyLink
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
[ "MIT" ]
1
2020-07-17T05:57:47.000Z
2020-07-17T05:57:47.000Z
backend/links/sentence.py
dla1635/hyLink
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
[ "MIT" ]
11
2020-06-06T00:30:23.000Z
2022-02-26T19:59:06.000Z
backend/links/sentence.py
dla1635/hylink
8f3d1b6b0cad57ce2f6861583eb2b523f9fceee7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from collections import Counter from konlpy.tag import Okt class Sentence(object): okt = Okt() def __init__(self, text, index=0): self.index = index self.text = text.strip() self.tokens = self.okt.phrases(self.text) self.bow = Counter(self.tokens) def __str__(self): return self.text def __hash__(self): return self.index
18.954545
49
0.611511
330
0.791367
0
0
0
0
0
0
23
0.055156
53726406b1ce515956afb2308d74b2a4c7e1b255
4,227
py
Python
tests/arch/x86/test_x86parser.py
IMULMUL/barf-project
9547ef843b8eb021c2c32c140e36173c0b4eafa3
[ "BSD-2-Clause" ]
1,395
2015-01-02T11:43:30.000Z
2022-03-30T01:15:26.000Z
tests/arch/x86/test_x86parser.py
IMULMUL/barf-project
9547ef843b8eb021c2c32c140e36173c0b4eafa3
[ "BSD-2-Clause" ]
54
2015-02-11T05:18:05.000Z
2021-12-10T08:45:39.000Z
tests/arch/x86/test_x86parser.py
IMULMUL/barf-project
9547ef843b8eb021c2c32c140e36173c0b4eafa3
[ "BSD-2-Clause" ]
207
2015-01-05T09:47:54.000Z
2022-03-30T01:15:29.000Z
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import import unittest from barf.arch import ARCH_X86_MODE_32 from barf.arch import ARCH_X86_MODE_64 from barf.arch.x86.parser import X86Parser class X86Parser32BitsTests(unittest.TestCase): def setUp(self): self._parser = X86Parser(ARCH_X86_MODE_32) def test_two_oprnd_reg_reg(self): asm = self._parser.parse("add eax, ebx") self.assertEqual(str(asm), "add eax, ebx") def test_two_oprnd_reg_imm(self): asm = self._parser.parse("add eax, 0x12345678") self.assertEqual(str(asm), "add eax, 0x12345678") def test_two_oprnd_reg_mem(self): asm = self._parser.parse("add eax, [ebx + edx * 4 + 0x10]") self.assertEqual(str(asm), "add eax, [ebx+edx*4+0x10]") def test_two_oprnd_mem_reg(self): asm = self._parser.parse("add [ebx + edx * 4 + 0x10], eax") self.assertEqual(str(asm), "add [ebx+edx*4+0x10], eax") def test_one_oprnd_reg(self): asm = self._parser.parse("inc eax") self.assertEqual(str(asm), "inc eax") def test_one_oprnd_imm(self): asm = self._parser.parse("jmp 0x12345678") self.assertEqual(str(asm), "jmp 0x12345678") def test_one_oprnd_mem(self): asm = self._parser.parse("inc dword ptr [ebx+edx*4+0x10]") self.assertEqual(str(asm), "inc dword ptr [ebx+edx*4+0x10]") def test_zero_oprnd(self): asm = self._parser.parse("nop") self.assertEqual(str(asm), "nop") # Misc # ======================================================================== # def test_misc_1(self): asm = self._parser.parse("mov dword ptr [-0x21524111], ecx") self.assertEqual(str(asm), "mov dword ptr [-0x21524111], ecx") self.assertNotEqual(str(asm), "mov dword ptr [0xdeadbeef], ecx") def test_misc_2(self): asm = self._parser.parse("fucompi st(1)") self.assertEqual(str(asm), "fucompi st1") class X86Parser64BitsTests(unittest.TestCase): def setUp(self): self._parser = X86Parser(ARCH_X86_MODE_64) def test_64_two_oprnd_reg_reg(self): asm = self._parser.parse("add rax, rbx") self.assertEqual(str(asm), "add rax, rbx") def test_64_two_oprnd_reg_reg_2(self): asm = self._parser.parse("add rax, r8") self.assertEqual(str(asm), "add rax, r8") def test_64_two_oprnd_reg_mem(self): asm = self._parser.parse("add rax, [rbx + r15 * 4 + 0x10]") self.assertEqual(str(asm), "add rax, [rbx+r15*4+0x10]") # Misc # ======================================================================== # def test_misc_offset_1(self): asm = self._parser.parse("add byte ptr [rax+0xffffff89], cl") self.assertEqual(str(asm), "add byte ptr [rax+0xffffff89], cl") def main(): unittest.main() if __name__ == '__main__': main()
33.283465
80
0.666903
2,623
0.620535
0
0
0
0
0
0
2,123
0.502247
5374082003f5a0ab7717d7cbdda9e4ca3ac483ea
1,236
py
Python
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
Chyi341152/pyConPaper
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
[ "MIT" ]
1
2018-05-30T02:36:46.000Z
2018-05-30T02:36:46.000Z
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
Chyi341152/pyConPaper
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
[ "MIT" ]
null
null
null
Concurrency/codeSample/Part4_Thread_Synchronuzation_Primitives/sema_signal.py
Chyi341152/pyConPaper
851190d59f8dc85b4f2a0b47c6505edd0367a6fe
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # sema_signal.py # # An example of using a semaphore for signaling between threads import threading import time done = threading.Semaphore(0) # Resource control. item = None def producer(): global item print("I'm the producer and I produce data.") print("Producer is going to sleep.") time.sleep(5) item = "Hello" print("Producer is alive. Signaling the consumer.") done.release() # Increments the count and signals waiting threads def consumer(): print("I'm a consumer and I want for date.") print("Consumer is waiting.") done.acquire() # Waits for the count is 0, otherwise decrements the count and continues print("Consumer got", item) t1 = threading.Thread(target=producer) t2 = threading.Thread(target=consumer) t1.start() t2.start() """ Semaphore Uses: 1. Resource control You can limit the number of threads performing certain operations.For example, performing database queries making network connections 2. Signaling Semaphores can be used to send "signals" between threads. For example, having one thread wake up another thread """
29.428571
145
0.669903
0
0
0
0
0
0
0
0
802
0.648867
5375dec1385aae371f742bbb1feff08c0d14da3b
3,199
py
Python
temp_wc_analysis/analysis.py
KarrLab/wc_sim
5b0ee03c3d19193fa67a3797d4258b753e6bc576
[ "MIT" ]
8
2018-03-27T21:35:25.000Z
2022-01-18T08:32:20.000Z
temp_wc_analysis/analysis.py
KarrLab/wc_sim
5b0ee03c3d19193fa67a3797d4258b753e6bc576
[ "MIT" ]
114
2018-02-27T14:14:39.000Z
2020-12-30T15:06:51.000Z
temp_wc_analysis/analysis.py
KarrLab/wc_sim
5b0ee03c3d19193fa67a3797d4258b753e6bc576
[ "MIT" ]
2
2019-04-05T16:17:28.000Z
2020-05-17T12:55:20.000Z
'''Analysis utility functions. :Author: Jonathan Karr <[email protected]> :Date: 2016-03-26 :Copyright: 2016-2018, Karr Lab :License: MIT ''' # TODO(Arthur): IMPORTANT: refactor and replace from matplotlib import pyplot from matplotlib import ticker from wc_lang import Model, Submodel from scipy.constants import Avogadro import numpy as np import re def plot(model, time = np.zeros(0), species_counts = None, volume = np.zeros(0), extracellular_volume = np.zeros(0), selected_species_compartments = [], yDatas = {}, units = 'mM', title = '', fileName = ''): #convert time to hours time = time.copy() / 3600 #create figure fig = pyplot.figure() #extract data to plot if not yDatas: yDatas = {} for species_compartment_id in selected_species_compartments: #extract data match = re.match('^(?P<speciesId>[a-z0-9\-_]+)\[(?P<compartmentId>[a-z0-9\-_]+)\]$', species_compartment_id, re.I).groupdict() speciesId = match['speciesId'] compartmentId = match['compartmentId'] if isinstance(model, Model): species = model.get_component_by_id(speciesId, 'species') compartment = model.get_component_by_id(compartmentId, 'compartments') yData = species_counts[species.index, compartment.index, :] elif isinstance(model, Submodel): yData = species_counts[species_compartment_id] else: raise Exception('Invalid model type %s' % model.__class__.__name__) #scale if compartmentId == 'c': V = volume else: V = extracellular_volume if units == 'pM': scale = 1 / Avogadro / V * 1e12 elif units == 'nM': scale = 1 / Avogadro / V * 1e9 elif units == 'uM': scale = 1 / Avogadro / V * 1e6 elif units == 'mM': scale = 1 / Avogadro / V * 1e3 elif units == 'M': scale = 1 / Avogadro / V * 1e0 elif units == 'molecules': scale = 1 else: raise Exception('Invalid units "%s"' % units) yData *= scale yDatas[species_compartment_id] = yData #plot results yMin = 1e12 yMax = -1e12 for label, yData in yDatas.items(): #update range yMin = min(yMin, np.min(yData)) yMax = max(yMax, np.max(yData)) #add to plot pyplot.plot(time, yData, label=label) #set axis limits pyplot.xlim((0, time[-1])) pyplot.ylim((yMin, yMax)) #add axis labels and legend if title: pyplot.title(title) pyplot.xlabel('Time (h)') if units == 'molecules': pyplot.ylabel('Copy number') else: pyplot.ylabel('Concentration (%s)' % units) y_formatter = ticker.ScalarFormatter(useOffset=False) pyplot.gca().get_yaxis().set_major_formatter(y_formatter) if len(selected_species_compartments) > 1: pyplot.legend() #save if fileName: fig.savefig(fileName) pyplot.close(fig)
29.081818
96
0.56924
0
0
0
0
0
0
0
0
600
0.187559
5378047f0579efdd010c7d57b8aefd313753aa1d
907
py
Python
setup.py
bstuddard/bonsai
3610fc50a3b24818288d850048c2a23306215367
[ "MIT" ]
26
2021-07-18T14:52:47.000Z
2022-01-27T10:35:44.000Z
setup.py
bstuddard/bonsai
3610fc50a3b24818288d850048c2a23306215367
[ "MIT" ]
null
null
null
setup.py
bstuddard/bonsai
3610fc50a3b24818288d850048c2a23306215367
[ "MIT" ]
3
2021-07-20T03:25:22.000Z
2021-08-17T04:06:27.000Z
from setuptools import setup, find_packages with open("README.md", "r") as readme_file: readme = readme_file.read() requirements = [ 'xgboost>=0.90', 'catboost>=0.26', 'bayesian-optimization>=1.2.0', 'numpy>=1.19.5', 'pandas>=1.1.5', 'matplotlib>=3.2.2', 'seaborn>=0.11.1', 'plotly>=4.4.1', 'pyyaml>=5.4.1' ] setup( name="bonsai-tree", version="1.2", author="Landon Buechner", author_email="[email protected]", description="Bayesian Optimization + Gradient Boosted Trees", long_description=readme, url="https://github.com/magi-1/bonsai", packages=find_packages(), package_data={'': ['*.yml']}, install_requires=requirements, license = 'MIT', classifiers=[ "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", ], )
25.914286
65
0.607497
0
0
0
0
0
0
0
0
443
0.488423
537b221bff7d480fcdf886ab83757cc48372b358
823
py
Python
_scripts/increment_version.py
clockhart/pathogen
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
[ "MIT" ]
null
null
null
_scripts/increment_version.py
clockhart/pathogen
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
[ "MIT" ]
null
null
null
_scripts/increment_version.py
clockhart/pathogen
1764d4a7d2dd7c1f5dcc08afc016ec4edf809c36
[ "MIT" ]
null
null
null
""" increment_version.py written in Python3 author: C. Lockhart <[email protected]> """ import yaml # Read in version with open('version.yml', 'r') as f: version = yaml.safe_load(f.read()) # Strip "dev" out of micro version['micro'] = int(str(version['micro']).replace('dev', '')) # Update patch version['micro'] += 1 # Add "dev" back to patch if version['micro'] != 0: version['micro'] = 'dev' + str(version['micro']) # Output version with open('version.yml', 'w') as f: yaml.safe_dump(version, f, sort_keys=False) # Transform version dict to string version = '.'.join([str(version[key]) for key in ['major', 'minor', 'micro']]) # Write version string to pathogen/_version.py with open('pathogen/version.py', 'w') as f: f.write("__version__ = '{}'\n".format(version)) # Return print(version)
22.243243
78
0.660996
0
0
0
0
0
0
0
0
433
0.526124
537c67be5a305675d3c345fd99a5e6be9b4b00c1
15,725
py
Python
holoviews/core/data/ibis.py
TheoMathurin/holoviews
0defcef994d6dd6d2054f75a0e332d02d121f8b0
[ "BSD-3-Clause" ]
1
2017-03-01T07:08:23.000Z
2017-03-01T07:08:23.000Z
holoviews/core/data/ibis.py
chrinide/holoviews
e1234a60ae0809ac561c204b1998dff0452b2bf0
[ "BSD-3-Clause" ]
null
null
null
holoviews/core/data/ibis.py
chrinide/holoviews
e1234a60ae0809ac561c204b1998dff0452b2bf0
[ "BSD-3-Clause" ]
null
null
null
import sys import numpy try: from collections.abc import Iterable except ImportError: from collections import Iterable from .. import util from ..element import Element from ..ndmapping import NdMapping, item_check, sorted_context from .interface import Interface from . import pandas from .util import cached class IbisInterface(Interface): types = () datatype = "ibis" default_partitions = 100 zero_indexed_backend_modules = [ 'ibis.backends.omniscidb.client', ] # the rowid is needed until ibis updates versions @classmethod def has_rowid(cls): import ibis.expr.operations return hasattr(ibis.expr.operations, "RowID") @classmethod def is_rowid_zero_indexed(cls, data): try: from ibis.client import find_backends, validate_backends (backend,) = validate_backends(list(find_backends(data))) except Exception: backend = data._find_backend() return type(backend).__module__ in cls.zero_indexed_backend_modules @classmethod def loaded(cls): return "ibis" in sys.modules @classmethod def applies(cls, obj): if not cls.loaded(): return False from ibis.expr.types import Expr return isinstance(obj, Expr) @classmethod def init(cls, eltype, data, keys, values): params = eltype.param.objects() index = params["kdims"] columns = params["vdims"] if isinstance(index.bounds[1], int): ndim = min([index.bounds[1], len(index.default)]) else: ndim = None nvdim = columns.bounds[1] if isinstance(columns.bounds[1], int) else None if keys and values is None: values = [c for c in data.columns if c not in keys] elif values and keys is None: keys = [c for c in data.columns if c not in values][:ndim] elif keys is None: keys = list(data.columns[:ndim]) if values is None: values = [ key for key in data.columns[ndim : ((ndim + nvdim) if nvdim else None)] if key not in keys ] elif keys == [] and values is None: values = list(data.columns[: nvdim if nvdim else None]) return data, dict(kdims=keys, vdims=values), {} @classmethod def compute(cls, dataset): return dataset.clone(dataset.data.execute()) @classmethod def persist(cls, dataset): return cls.compute(dataset) @classmethod @cached def length(self, dataset): # Get the length by counting the length of an empty query. return dataset.data[[]].count().execute() @classmethod @cached def nonzero(cls, dataset): # Make an empty query to see if a row is returned. return bool(len(dataset.data[[]].head(1).execute())) @classmethod @cached def range(cls, dataset, dimension): dimension = dataset.get_dimension(dimension, strict=True) if cls.dtype(dataset, dimension).kind in 'SUO': return None, None if dimension.nodata is not None: return Interface.range(dataset, dimension) column = dataset.data[dimension.name] return tuple( dataset.data.aggregate([column.min(), column.max()]).execute().values[0, :] ) @classmethod @cached def values( cls, dataset, dimension, expanded=True, flat=True, compute=True, keep_index=False, ): dimension = dataset.get_dimension(dimension, strict=True) data = dataset.data[dimension.name] if not expanded: data = data.distinct() return data if keep_index or not compute else data.execute().values @classmethod def histogram(cls, expr, bins, density=True, weights=None): bins = numpy.asarray(bins) bins = [int(v) if bins.dtype.kind in 'iu' else float(v) for v in bins] binned = expr.bucket(bins).name('bucket') hist = numpy.zeros(len(bins)-1) hist_bins = binned.value_counts().sort_by('bucket').execute() for b, v in zip(hist_bins['bucket'], hist_bins['count']): if numpy.isnan(b): continue hist[int(b)] = v if weights is not None: raise NotImplementedError("Weighted histograms currently " "not implemented for IbisInterface.") if density: hist = hist/expr.count().execute() return hist, bins @classmethod @cached def shape(cls, dataset): return cls.length(dataset), len(dataset.data.columns) @classmethod @cached def dtype(cls, dataset, dimension): dimension = dataset.get_dimension(dimension) return dataset.data.head(0).execute().dtypes[dimension.name] dimension_type = dtype @classmethod def sort(cls, dataset, by=[], reverse=False): return dataset.data.sort_by([(dataset.get_dimension(x).name, not reverse) for x in by]) @classmethod def redim(cls, dataset, dimensions): return dataset.data.mutate( **{v.name: dataset.data[k] for k, v in dimensions.items()} ) validate = pandas.PandasInterface.validate reindex = pandas.PandasInterface.reindex @classmethod def _index_ibis_table(cls, data): import ibis if not cls.has_rowid(): raise ValueError( "iloc expressions are not supported for ibis version %s." % ibis.__version__ ) if "hv_row_id__" in data.columns: return data if cls.is_rowid_zero_indexed(data): return data.mutate(hv_row_id__=data.rowid()) else: return data.mutate(hv_row_id__=data.rowid() - 1) @classmethod def iloc(cls, dataset, index): rows, columns = index scalar = all(map(util.isscalar, index)) if isinstance(columns, slice): columns = [x.name for x in dataset.dimensions()[columns]] elif numpy.isscalar(columns): columns = [dataset.get_dimension(columns).name] else: columns = [dataset.get_dimension(d).name for d in columns] data = cls._index_ibis_table(dataset.data[columns]) if scalar: return ( data.filter(data.hv_row_id__ == rows)[columns] .head(1) .execute() .iloc[0, 0] ) if isinstance(rows, slice): # We should use a pseudo column for the row number but i think that is still awaiting # a pr on ibis if any(x is not None for x in (rows.start, rows.stop, rows.step)): predicates = [] if rows.start: predicates += [data.hv_row_id__ >= rows.start] if rows.stop: predicates += [data.hv_row_id__ < rows.stop] return data.filter(predicates).drop(["hv_row_id__"]) else: if not isinstance(rows, Iterable): rows = [rows] return data.filter([data.hv_row_id__.isin(rows)]).drop(["hv_row_id__"]) return data.drop(["hv_row_id__"]) @classmethod def unpack_scalar(cls, dataset, data): """ Given a dataset object and data in the appropriate format for the interface, return a simple scalar. """ if len(data.columns) > 1 or data[[]].count().execute() != 1: return data return data.execute().iat[0, 0] @classmethod def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs): # aggregate the necesary dimensions index_dims = [dataset.get_dimension(d, strict=True) for d in dimensions] element_dims = [kdim for kdim in dataset.kdims if kdim not in index_dims] group_kwargs = {} if group_type != "raw" and issubclass(group_type, Element): group_kwargs = dict(util.get_param_values(dataset), kdims=element_dims) group_kwargs.update(kwargs) group_kwargs["dataset"] = dataset.dataset group_by = [d.name for d in index_dims] # execute a query against the table to find the unique groups. groups = dataset.data.groupby(group_by).aggregate().execute() # filter each group based on the predicate defined. data = [ ( tuple(s.values.tolist()), group_type( dataset.data.filter( [dataset.data[k] == v for k, v in s.to_dict().items()] ), **group_kwargs ), ) for i, s in groups.iterrows() ] if issubclass(container_type, NdMapping): with item_check(False), sorted_context(False): return container_type(data, kdims=index_dims) else: return container_type(data) @classmethod def assign(cls, dataset, new_data): return dataset.data.mutate(**new_data) @classmethod def add_dimension(cls, dataset, dimension, dim_pos, values, vdim): import ibis data = dataset.data if dimension.name not in data.columns: if not isinstance(values, ibis.Expr) and not numpy.isscalar(values): raise ValueError("Cannot assign %s type as a Ibis table column, " "expecting either ibis.Expr or scalar." % type(values).__name__) data = data.mutate(**{dimension.name: values}) return data @classmethod @cached def isscalar(cls, dataset, dim): return ( dataset.data[dataset.get_dimension(dim, strict=True).name] .distinct() .count() .compute() == 1 ) @classmethod def select(cls, dataset, selection_mask=None, **selection): if selection_mask is None: selection_mask = cls.select_mask(dataset, selection) indexed = cls.indexed(dataset, selection) data = dataset.data if isinstance(selection_mask, numpy.ndarray): data = cls._index_ibis_table(data) if selection_mask.dtype == numpy.dtype("bool"): selection_mask = numpy.where(selection_mask)[0] data = data.filter( data["hv_row_id__"].isin(list(map(int, selection_mask))) ).drop(["hv_row_id__"]) elif selection_mask is not None and not (isinstance(selection_mask, list) and not selection_mask): data = data.filter(selection_mask) if indexed and data.count().execute() == 1 and len(dataset.vdims) == 1: return data[dataset.vdims[0].name].execute().iloc[0] return data @classmethod def select_mask(cls, dataset, selection): import ibis predicates = [] for dim, object in selection.items(): if isinstance(object, tuple): object = slice(*object) alias = dataset.get_dimension(dim).name column = dataset.data[alias] if isinstance(object, slice): if object.start is not None: # Workaround for dask issue #3392 bound = util.numpy_scalar_to_python(object.start) predicates.append(bound <= column) if object.stop is not None: bound = util.numpy_scalar_to_python(object.stop) predicates.append(column < bound) elif isinstance(object, (set, list)): # rowid conditions condition = None for id in object: predicate = column == id condition = ( predicate if condition is None else condition | predicate ) if condition is not None: predicates.append(condition) elif callable(object): predicates.append(object(column)) elif isinstance(object, ibis.Expr): predicates.append(object) else: predicates.append(column == object) return predicates @classmethod def sample(cls, dataset, samples=[]): import ibis dims = dataset.dimensions() data = dataset.data if all(util.isscalar(s) or len(s) == 1 for s in samples): items = [s[0] if isinstance(s, tuple) else s for s in samples] return data[data[dims[0].name].isin(items)] predicates = None for sample in samples: if util.isscalar(sample): sample = [sample] if not sample: continue predicate = None for i, v in enumerate(sample): p = data[dims[i].name] == ibis.literal(util.numpy_scalar_to_python(v)) if predicate is None: predicate = p else: predicate &= p if predicates is None: predicates = predicate else: predicates |= predicate return data if predicates is None else data.filter(predicates) @classmethod def aggregate(cls, dataset, dimensions, function, **kwargs): import ibis.expr.operations data = dataset.data columns = [d.name for d in dataset.kdims if d in dimensions] values = dataset.dimensions("value", label="name") new = data[columns + values] function = { numpy.min: ibis.expr.operations.Min, numpy.nanmin: ibis.expr.operations.Min, numpy.max: ibis.expr.operations.Max, numpy.nanmax: ibis.expr.operations.Max, numpy.mean: ibis.expr.operations.Mean, numpy.nanmean: ibis.expr.operations.Mean, numpy.std: ibis.expr.operations.StandardDev, numpy.nanstd: ibis.expr.operations.StandardDev, numpy.sum: ibis.expr.operations.Sum, numpy.nansum: ibis.expr.operations.Sum, numpy.var: ibis.expr.operations.Variance, numpy.nanvar: ibis.expr.operations.Variance, len: ibis.expr.operations.Count, }.get(function, function) if len(dimensions): selection = new.groupby(columns) if function is numpy.count_nonzero: aggregation = selection.aggregate( **{ x: ibis.expr.operations.Count(new[x], where=new[x] != 0).to_expr() for x in new.columns if x not in columns } ) else: aggregation = selection.aggregate( **{ x: function(new[x]).to_expr() for x in new.columns if x not in columns } ) else: aggregation = new.aggregate( **{x: function(new[x]).to_expr() for x in new.columns} ) dropped = [x for x in values if x not in data.columns] return aggregation, dropped @classmethod @cached def mask(cls, dataset, mask, mask_value=numpy.nan): raise NotImplementedError('Mask is not implemented for IbisInterface.') @classmethod @cached def dframe(cls, dataset, dimensions): return dataset.data[dimensions].execute() Interface.register(IbisInterface)
35.022272
106
0.569348
15,367
0.977234
0
0
14,832
0.943211
0
0
1,059
0.067345
537e41912df4cf73c680542167c1c109a8513d39
3,907
py
Python
chess/models/tournament.py
S0Imyr/Projet-4
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
[ "MIT" ]
null
null
null
chess/models/tournament.py
S0Imyr/Projet-4
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
[ "MIT" ]
null
null
null
chess/models/tournament.py
S0Imyr/Projet-4
1d93e125bc6e44bc560f3ffc9b11e14e35291c98
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Handles the tournament logic """ import datetime from chess.utils.utils import get_new_id from chess.models.actors import Player from chess.models.round import Round TOURNAMENT_ID_WIDTH = 8 NB_ROUND = 4 NB_PLAYERS = 8 NB_MATCH = 4 class Tournament: """ The class Tournament is the central piece of the models. """ last_tournament_id = "0" * TOURNAMENT_ID_WIDTH def __init__(self, name, location, timer_type, description): Tournament.last_tournament_id = get_new_id(Tournament.last_tournament_id, TOURNAMENT_ID_WIDTH) self.tournament_id = Tournament.last_tournament_id self.name = name self.location = location self.start_date = None self.end_date = None self.timer_type = timer_type self.description = description self.number_of_rounds = NB_ROUND self.rounds = [] self.list_of_players = [] self.players_assigned = False self.finished = False def define_players(self, actors): """ Defines the list of identifier of the players who join the tournament. :param actors: :return: None """ for num_player in range(NB_PLAYERS): self.list_of_players.append(Player(actors[num_player], self.tournament_id, num_player)) def init_round(self, num_round): """ Launches the round number "num_round". :param num_round: number of the round played :return: None """ tour = Round(num_round, self.tournament_id, self.list_of_players) tour.start_date = datetime.date.today() tour.rank_players() tour.define_matches() self.rounds.append(tour) def register_round_results(self, num_round, winner): """ Registers the results of the round. :param num_round: the round number. :param winner: the list of the winners. :return: None. """ self.rounds[num_round].register_results(winner) self.rounds[num_round].assign_points() self.rounds[num_round].finished = True self.rounds[num_round].memorize_opponents() self.rounds[num_round].rank_players() self.rounds[num_round].end_date = datetime.date.today() def tournament_to_dict(self): """ Converts the tournament into a dictionary :return: dictionary of the tournament instance. """ string_attributes = ['tournament_id', 'name', 'location', 'timer_type', 'description', 'number_of_rounds', 'players_assigned'] serialized_tournament = {} for attribute in string_attributes: serialized_tournament[attribute] = getattr(self, attribute) serialized_tournament['rounds'] = [] for r0und in self.rounds: serialized_tournament['rounds'].append(r0und.round_to_dict()) serialized_tournament['list_of_players'] = [] for player in self.list_of_players: serialized_tournament['list_of_players'].append(player.player_to_dict()) serialized_tournament['start_date'] = str(self.start_date) serialized_tournament['end_date'] = str(self.end_date) return serialized_tournament def end_tournament(self): """ Handles the end of the tournament. Adds the tournament_id to the players list of tournaments. Defines the attribute finished and the end date of the tournament. """ for player in self.list_of_players: player.actor.list_of_tournaments_played.append(self.tournament_id) self.finished = True self.end_date = datetime.date.today()
33.393162
102
0.621449
3,637
0.930893
0
0
0
0
0
0
1,029
0.263373
537ea975bc8b1468e691c88bd35a36f7347e9442
1,895
py
Python
set-config.py
astubenazy/vrops-metric-collection
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
[ "MIT" ]
2
2020-04-08T13:03:00.000Z
2020-08-25T18:21:27.000Z
set-config.py
astubenazy/vrops-metric-collection
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
[ "MIT" ]
1
2019-08-15T11:19:18.000Z
2019-08-17T11:38:48.000Z
set-config.py
astubenazy/vrops-metric-collection
c4e5b8d7058759aa5eded74cc619d1dedcbc821a
[ "MIT" ]
7
2018-06-06T13:47:52.000Z
2021-06-17T18:33:27.000Z
# !/usr/bin python """ # # set-config - a small python program to setup the configuration environment for data-collect.py # data-collect.py contain the python program to gather Metrics from vROps # Author Sajal Debnath <[email protected]> # """ # Importing the required modules import json import base64 import os,sys # Getting the absolute path from where the script is being run def get_script_path(): return os.path.dirname(os.path.realpath(sys.argv[0])) def get_the_inputs(): adapterkind = raw_input("Please enter Adapter Kind: ") resourceKind = raw_input("Please enter Resource Kind: ") servername = raw_input("Enter enter Server IP/FQDN: ") serveruid = raw_input("Please enter user id: ") serverpasswd = raw_input("Please enter vRops password: ") encryptedvar = base64.b64encode(serverpasswd) maxsamples = raw_input("Please enter the maximum number of samples to collect: ") keys_to_monitor = raw_input("Please enter the number of keys to monitor: ") keys = [] for i in range(int(keys_to_monitor)): keys.append(raw_input("Enter the key: ")) data = {} if int(maxsamples) < 1: maxsamples = 1 data["adapterKind"] = adapterkind data["resourceKind"] = resourceKind data["sampleno"] = int(maxsamples) serverdetails = {} serverdetails["name"] = servername serverdetails["userid"] = serveruid serverdetails["password"] = encryptedvar data["server"] = serverdetails data["keys"] = keys return data # Getting the path where config.json file should be kept path = get_script_path() fullpath = path+"/"+"config.json" # Getting the data for the config.json file final_data = get_the_inputs() # Saving the data to config.json file with open(fullpath, 'w') as outfile: json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False)
29.153846
107
0.701847
0
0
0
0
0
0
0
0
840
0.443272
537f1ecf5b58054b91b3f560bcbfa1d5fc3ac88d
16,328
py
Python
tests/test_app.py
inmanta/inmanta-core
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
[ "Apache-2.0" ]
6
2021-03-09T10:24:02.000Z
2022-01-16T03:52:11.000Z
tests/test_app.py
inmanta/inmanta-core
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
[ "Apache-2.0" ]
1,319
2020-12-18T08:52:29.000Z
2022-03-31T18:17:32.000Z
tests/test_app.py
inmanta/inmanta-core
ae2153d57f124d00ad1b58e6d4bc6818364be4a8
[ "Apache-2.0" ]
4
2021-03-03T15:36:50.000Z
2022-03-11T11:41:51.000Z
""" Copyright 2018 Inmanta Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contact: [email protected] """ import os import re import signal import subprocess import sys from subprocess import TimeoutExpired from threading import Timer import pytest import inmanta.util from inmanta import const def get_command( tmp_dir, stdout_log_level=None, log_file=None, log_level_log_file=None, timed=False, dbport=None, dbname="inmanta", dbhost=None, dbuser=None, dbpass=None, config_dir=None, server_extensions=[], version=False, ): root_dir = tmp_dir.mkdir("root").strpath log_dir = os.path.join(root_dir, "log") state_dir = os.path.join(root_dir, "data") for directory in [log_dir, state_dir]: os.mkdir(directory) config_file = os.path.join(root_dir, "inmanta.cfg") if dbport is not None: port = dbport else: port = inmanta.util.get_free_tcp_port() with open(config_file, "w+", encoding="utf-8") as f: f.write("[config]\n") f.write("log-dir=" + log_dir + "\n") f.write("state-dir=" + state_dir + "\n") f.write("[database]\n") f.write("port=" + str(port) + "\n") f.write("name=" + dbname + "\n") if dbhost: f.write(f"host={dbhost}\n") if dbuser: f.write(f"username={dbuser}\n") if dbpass: f.write(f"password={dbpass}\n") f.write("[server]\n") f.write(f"enabled_extensions={', '.join(server_extensions)}\n") args = [sys.executable, "-m", "inmanta.app"] if stdout_log_level: args.append("-" + "v" * stdout_log_level) if log_file: log_file = os.path.join(log_dir, log_file) args += ["--log-file", log_file] if log_file and log_level_log_file: args += ["--log-file-level", str(log_level_log_file)] if timed: args += ["--timed-logs"] if config_dir: args += ["--config-dir", config_dir] if version: args += ["--version"] args += ["-c", config_file, "server"] return (args, log_dir) def do_run(args, env={}, cwd=None): baseenv = os.environ.copy() baseenv.update(env) process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=baseenv) return process def convert_to_ascii(text): return [line for line in text.decode("ascii").split("\n") if line != ""] def do_kill(process, killtime=3, termtime=2): def do_and_log(func, msg): def w(): print(msg) func() return w t1 = Timer(killtime, do_and_log(process.kill, "killed process")) t2 = Timer(termtime, do_and_log(process.terminate, "terminated process")) t1.start() t2.start() out, err = process.communicate() t1.cancel() t2.cancel() stdout = convert_to_ascii(out) stderr = convert_to_ascii(err) return (stdout, stderr, process.returncode) def run_without_tty(args, env={}, killtime=3, termtime=2): process = do_run(args, env) return do_kill(process, killtime, termtime) def run_with_tty(args, killtime=3, termtime=2): """Could not get code for actual tty to run stable in docker, so we are faking it """ env = {const.ENVIRON_FORCE_TTY: "true"} return run_without_tty(args, env=env, killtime=killtime, termtime=termtime) def get_timestamp_regex(): return r"[\d]{4}\-[\d]{2}\-[\d]{2} [\d]{2}\:[\d]{2}\:[\d]{2}\,[\d]{3}" def get_compiled_regexes(regexes, timed): result = [] for regex in regexes: if timed: regex = get_timestamp_regex() + " " + regex compiled_regex = re.compile(regex) result.append(compiled_regex) return result def is_colorama_package_available(): try: import colorama # noqa: F401 except ModuleNotFoundError: return False return True def test_verify_that_colorama_package_is_not_present(): """ The colorama package turns the colored characters in TTY-based terminal into uncolored characters. As such, this package should not be present. """ assert not is_colorama_package_available() @pytest.mark.parametrize_any( "log_level, timed, with_tty, regexes_required_lines, regexes_forbidden_lines", [ ( 3, False, False, [r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"], [], ), ( 2, False, False, [r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint"], [r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"], ), ( 3, False, True, [ r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint", r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint", ], [], ), ( 2, False, True, [r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint"], [r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint"], ), ( 3, True, False, [r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"], [], ), ( 2, True, False, [r"[a-z.]*[ ]*INFO[\s]+Starting server endpoint"], [r"[a-z.]*[ ]*DEBUG[\s]+Starting Server Rest Endpoint"], ), ( 3, True, True, [ r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint", r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint", ], [], ), ( 2, True, True, [r"\x1b\[32m[a-z.]*[ ]*INFO[\s]*\x1b\[0m \x1b\[34mStarting server endpoint"], [r"\x1b\[36m[a-z.]*[ ]*DEBUG[\s]*\x1b\[0m \x1b\[34mStarting Server Rest Endpoint"], ), ], ) @pytest.mark.timeout(20) def test_no_log_file_set(tmpdir, log_level, timed, with_tty, regexes_required_lines, regexes_forbidden_lines): if is_colorama_package_available() and with_tty: pytest.skip("Colorama is present") (args, log_dir) = get_command(tmpdir, stdout_log_level=log_level, timed=timed) if with_tty: (stdout, _, _) = run_with_tty(args) else: (stdout, _, _) = run_without_tty(args) log_file = "server.log" assert log_file not in os.listdir(log_dir) assert len(stdout) != 0 check_logs(stdout, regexes_required_lines, regexes_forbidden_lines, timed) @pytest.mark.parametrize_any( "log_level, with_tty, regexes_required_lines, regexes_forbidden_lines", [ ( 3, False, [ r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint", ], [], ), ( 2, False, [r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint"], [r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint"], ), ( 3, True, [ r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint", r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint", ], [], ), ( 2, True, [r"[a-z.]*[ ]*INFO[\s]+[a-x\.A-Z]*[\s]Starting server endpoint"], [r"[a-z.]*[ ]*DEBUG[\s]+[a-x\.A-Z]*[\s]Starting Server Rest Endpoint"], ), ], ) @pytest.mark.timeout(60) def test_log_file_set(tmpdir, log_level, with_tty, regexes_required_lines, regexes_forbidden_lines): if is_colorama_package_available() and with_tty: pytest.skip("Colorama is present") log_file = "server.log" (args, log_dir) = get_command(tmpdir, stdout_log_level=log_level, log_file=log_file, log_level_log_file=log_level) if with_tty: (stdout, _, _) = run_with_tty(args) else: (stdout, _, _) = run_without_tty(args) assert log_file in os.listdir(log_dir) log_file = os.path.join(log_dir, log_file) with open(log_file, "r") as f: log_lines = f.readlines() check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed=True) check_logs(stdout, [], regexes_required_lines, timed=True) check_logs(stdout, [], regexes_required_lines, timed=False) def check_logs(log_lines, regexes_required_lines, regexes_forbidden_lines, timed): compiled_regexes_requires_lines = get_compiled_regexes(regexes_required_lines, timed) compiled_regexes_forbidden_lines = get_compiled_regexes(regexes_forbidden_lines, timed) for line in log_lines: print(line) for regex in compiled_regexes_requires_lines: if not any(regex.match(line) for line in log_lines): pytest.fail("Required pattern was not found in log lines: %s" % (regex.pattern,)) for regex in compiled_regexes_forbidden_lines: if any(regex.match(line) for line in log_lines): pytest.fail("Forbidden pattern found in log lines: %s" % (regex.pattern,)) def test_check_shutdown(): process = do_run([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py")]) # wait for handler to be in place try: process.communicate(timeout=2) except TimeoutExpired: pass process.send_signal(signal.SIGUSR1) out, err, code = do_kill(process, killtime=3, termtime=1) print(out, err) assert code == 0 assert "----- Thread Dump ----" in out assert "STOP" in out assert "SHUTDOWN COMPLETE" in out def test_check_bad_shutdown(): print([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py"), "bad"]) process = do_run([sys.executable, os.path.join(os.path.dirname(__file__), "miniapp.py"), "bad"]) out, err, code = do_kill(process, killtime=5, termtime=2) print(out, err) assert code == 3 assert "----- Thread Dump ----" in out assert "STOP" not in out assert "SHUTDOWN COMPLETE" not in out assert not err def test_startup_failure(tmpdir, postgres_db, database_name): (args, log_dir) = get_command( tmpdir, dbport=postgres_db.port, dbname=database_name, dbhost=postgres_db.host, dbuser=postgres_db.user, dbpass=postgres_db.password, server_extensions=["badplugin"], ) pp = ":".join(sys.path) # Add a bad module extrapath = os.path.join(os.path.dirname(__file__), "data", "bad_module_path") (stdout, stderr, code) = run_without_tty(args, env={"PYTHONPATH": pp + ":" + extrapath}, killtime=15, termtime=10) assert "inmanta ERROR Server setup failed" in stdout assert ( "inmanta.server.protocol.SliceStartupException: " "Slice badplugin.badslice failed to start because: Too bad, this plugin is broken" ) in stdout assert code == 4 def test_compiler_exception_output(snippetcompiler): snippetcompiler.setup_for_snippet( """ entity Test: number attr end implement Test using std::none o = Test(attr="1234") """ ) output = ( """Could not set attribute `attr` on instance `__config__::Test (instantiated at ./main.cf:8)` """ """(reported in Construct(Test) (./main.cf:8)) caused by: Invalid value '1234', expected Number (reported in Construct(Test) (./main.cf:8)) """ ) def exec(*cmd): process = do_run([sys.executable, "-m", "inmanta.app"] + list(cmd), cwd=snippetcompiler.project_dir) out, err = process.communicate(timeout=30) assert out.decode() == "" assert err.decode() == output exec("compile") exec("export", "-J", "out.json") @pytest.mark.timeout(15) @pytest.mark.parametrize_any( "cmd", [(["-X", "compile"]), (["compile", "-X"]), (["compile"]), (["export", "-X"]), (["-X", "export"]), (["export"])] ) def test_minus_x_option(snippetcompiler, cmd): snippetcompiler.setup_for_snippet( """ entity Test: nuber attr end """ ) process = do_run([sys.executable, "-m", "inmanta.app"] + cmd, cwd=snippetcompiler.project_dir) out, err = process.communicate(timeout=30) assert out.decode() == "" if "-X" in cmd: assert "inmanta.ast.TypeNotFoundException: could not find type nuber in namespace" in str(err) else: assert "inmanta.ast.TypeNotFoundException: could not find type nuber in namespace" not in str(err) @pytest.mark.timeout(20) def test_warning_config_dir_option_on_server_command(tmpdir): non_existing_dir = os.path.join(tmpdir, "non_existing_dir") assert not os.path.isdir(non_existing_dir) (args, _) = get_command(tmpdir, stdout_log_level=3, config_dir=non_existing_dir) (stdout, _, _) = run_without_tty(args) stdout = "".join(stdout) assert "Starting server endpoint" in stdout assert f"Config directory {non_existing_dir} doesn't exist" in stdout @pytest.mark.timeout(20) def test_warning_min_c_option_file_doesnt_exist(snippetcompiler, tmpdir): non_existing_config_file = os.path.join(tmpdir, "non_existing_config_file") snippetcompiler.setup_for_snippet( """ entity Test: number attr end """ ) config_options = ["-c", non_existing_config_file, "-vvv"] args = [sys.executable, "-m", "inmanta.app"] + config_options + ["compile"] process = do_run(args, cwd=snippetcompiler.project_dir) out, err = process.communicate(timeout=30) assert process.returncode == 0 out = out.decode() err = err.decode() all_output = out + err assert "Starting compile" in all_output assert "Compile done" in all_output assert f"Config file {non_existing_config_file} doesn't exist" in all_output @pytest.mark.parametrize_any( "with_tty, version_should_be_shown, regexes_required_lines, regexes_forbidden_lines", [ (False, True, [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"], []), (True, True, [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"], []), (False, False, [], [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"]), (True, False, [], [r"Inmanta Service Orchestrator", r"Compiler version: ", r"Extensions:", r"\s*\* core:"]), ], ) @pytest.mark.timeout(20) def test_version_argument_is_set(tmpdir, with_tty, version_should_be_shown, regexes_required_lines, regexes_forbidden_lines): (args, log_dir) = get_command(tmpdir, version=version_should_be_shown) if with_tty: (stdout, _, _) = run_with_tty(args, killtime=15, termtime=10) else: (stdout, _, _) = run_without_tty(args, killtime=15, termtime=10) assert len(stdout) != 0 check_logs(stdout, regexes_required_lines, regexes_forbidden_lines, False) def test_init_project(tmpdir): args = [sys.executable, "-m", "inmanta.app", "project", "init", "-n", "test-project", "-o", tmpdir, "--default"] (stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10) test_project_path = os.path.join(tmpdir, "test-project") assert return_code == 0 assert os.path.exists(test_project_path) (stdout, stderr, return_code) = run_without_tty(args, killtime=15, termtime=10) assert return_code != 0 assert len(stderr) == 1 assert "already exists" in stderr[0]
33.390593
125
0.614282
0
0
0
0
7,759
0.475196
0
0
5,108
0.312837
537fda8bf126c424a17def77a9e57731a1bb799c
449
py
Python
AtC_Beg_Con_021-030/ABC027/C.py
yosho-18/AtCoder
50f6d5c92a01792552c31ac912ce1cd557b06fb0
[ "MIT" ]
null
null
null
AtC_Beg_Con_021-030/ABC027/C.py
yosho-18/AtCoder
50f6d5c92a01792552c31ac912ce1cd557b06fb0
[ "MIT" ]
null
null
null
AtC_Beg_Con_021-030/ABC027/C.py
yosho-18/AtCoder
50f6d5c92a01792552c31ac912ce1cd557b06fb0
[ "MIT" ]
null
null
null
n = int(input()) row = 0 for i in range(100): if 2 ** i <= n <= 2 ** (i + 1) - 1: row = i break def seki(k, n): for _ in range(n): k = 4 * k + 2 return k k = 0 if row % 2 != 0: k = 2 cri = seki(k, row // 2) if n < cri: print("Aoki") else: print("Takahashi") else: k = 1 cri = seki(k, row // 2) if n < cri: print("Takahashi") else: print("Aoki")
14.966667
39
0.4098
0
0
0
0
0
0
0
0
34
0.075724
5382d0895ddebaa840fcd4f4a2179b700c0dfe67
21,396
py
Python
extplugins/codvote.py
Desi-Boyz/cod4x-server-B3-configs
03a323d7ea293efe1831ed315001391b9aaf532a
[ "MIT" ]
1
2017-07-17T22:21:10.000Z
2017-07-17T22:21:10.000Z
extplugins/codvote.py
Desi-Boyz/cod4x-server-B3-configs
03a323d7ea293efe1831ed315001391b9aaf532a
[ "MIT" ]
null
null
null
extplugins/codvote.py
Desi-Boyz/cod4x-server-B3-configs
03a323d7ea293efe1831ed315001391b9aaf532a
[ "MIT" ]
null
null
null
# CoDVote plugin for BigBrotherBot(B3) (www.bigbrotherbot.net) # Copyright (C) 2015 ph03n1x # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # Changelog: # v1.0.1 - Fixed vote remaining in progress if requirements for vote unmet. # v1.0.2 - Added "!vote maps" to show what maps can be called into vote. # - Fixed issue where person who called vote needed to vote as well. Changed to automatic yes vote. __version__ = '1.0.2' __author__ = 'ph03n1x' import b3, threading import b3.plugin import b3.events class CodvotePlugin(b3.plugin.Plugin): adminPlugin = None _vote = None # Stores which vote is currently in progress _value = None # Stores the value of the vote _votetime = 30 # Time before a vote will be canceled for not passing _aVotes = {} # All votes allowed. Imported from "votes" section in config _aMaps = {} # All vote allowed maps. Imported from "votemaps" section in config _amt_yes = [] # Amount of players who voted yes. Checked against amount of players in game _amt_no = [] _allplayers = [] # Amount of players in game _mapRequested = None # Stores which map is being voted for _kickRequested = None # Stores which player will be kicked if vote passed _default_messages = { 'tovote': '^7Use ^2!yes ^7or ^2!no ^7 to vote', 'map': "Map vote in progress: Change map to ^3$s^7?", 'nextmap': "Next map vote in progress. Change next map to ^3$s^7?", 'kick': "Kick vote in progress: Kick ^2$s^7?", 'maprotate': "Rotate map vote in progress. Go to next map?", 'maprestart': "Maprestart vote in progress. Restart current map?", 'friendlyfire': "Friendlyfire vote in progress. Change friendlyfire mode to ^2$s^7?", 'killcam': "Killcam vote in progress. Turn killcam ^2$s^7?", 'scorelimit': "Scorelimit vote in progress. Change score limit to ^2$s^7?", 'timelimit': "Timelimit vote in progress. Change time limit to ^2$s^7?", 'roundlength': "Round length vote in progress. Change round length to ^2$s^7?", 'roundlimit': "Round limit vote in progress. Change round limit to ^2$s^7?", } def onStartup(self): self.adminPlugin = self.console.getPlugin('admin') if not self.adminPlugin: self.error('Could not find admin plugin') return # Register commands if 'commands' in self.config.sections(): for cmd in self.config.options('commands'): level = self.config.get('commands', cmd) sp = cmd.split('-') alias = None if len(sp) == 2: cmd, alias = sp func = self.getCmd(cmd) if func: self.adminPlugin.registerCommand(self, cmd, level, func, alias) # Re-deploy commands for consideration of this plugin self.adminPlugin.registerCommand(self, 'nextmap', 1, self.cmd_nextmap, 'nm') self.adminPlugin.registerCommand(self, 'maprotate', 20, self.cmd_maprotate, None) self.adminPlugin.registerCommand(self, 'allvotes', 1, self.cmd_allvotes, None) # Register events self.registerEvent('EVT_GAME_EXIT', self.onGameEnd) def onLoadConfig(self): # Load settings section try: self._votetime = self.config.getint('settings', 'votetime') except: self.debug('Unable to get [votetime] from settings. Using default: %s' % self._votetime) # Load votemaps section if self.config.has_section('votemaps'): for (mapname, consolename) in self.config.items('votemaps'): if mapname: self._aMaps[mapname] = consolename self.debug('Successfully entered maps for voting: %s' % self._aMaps) # Load votes section if self.config.has_section('votes'): adLvl = {'guest': 0, 'user': 1, 'reg': 2, 'mod': 20, 'admin': 40, 'fulladmin': 60, 'senioradmin': 80, 'superadmin': 100} for (entry, value) in self.config.items('votes'): try: value = int(value) self._aVotes[entry.lower()] = value except ValueError: self._aVotes[entry.lower()] = adLvl[value] self.debug('Allowed votes are: %s' % self._aVotes) def getCmd(self, cmd): cmd = 'cmd_%s' % cmd if hasattr(self, cmd): func = getattr(self, cmd) return func return None ######################### VOTE TIMING ############################## def voteTimer(self): t1 = threading.Timer((self._votetime - 5), self.voteMessage) t1.start() def voteMessage(self): if self._vote: self.console.say('^110 seconds until vote end!') t2 = threading.Timer(10, self.denyVote) t2.start() ######################### MAP HANDLING ############################## def _search(self, maplist, partial): a = [] for mapname, consolename in maplist.iteritems(): if partial in mapname: a.append(mapname) elif partial in consolename: a.append(mapname) return a def mapvote(self, client, wantedMap): # Find if map is in allowed list match = self._search(self._aMaps, wantedMap) if len(match) == 1: self._mapRequested = match[0] self._value = match[0] return True elif len(match) > 1: match = (', ').join(match) client.message('^1ABORTED!^7Multiple matches: %s' % match) return False elif len(match) == 0: client.message('^1ABORTED!^7No maps matching your request') return False ############### NEXTMAP FUNCTIONING ################ def onGameEnd(self, event): """ Handle EVT_GAME_ROUND_END """ if self._mapRequested: self.confirmMap() self._mapRequested = None ############### CONFIRM VOTES ###################### def confirmVote(self): self.console.say('^3Vote passed!^7') if self._vote == 'map': self.confirmMap() elif self._vote == 'nextmap': self.debug('nextmap vote passed. Params already stored') elif self._vote == 'kick': self.confirmKick() elif self._vote == 'maprotate': if self._mapRequested: self.confirmMap() else: self.console.rotateMap() elif self._vote == 'maprestart': self.confirmMaprestart() elif self._vote == 'friendlyfire': self.confirmFriendlyFire() elif self._vote == 'killcam': self.confirmKillCam() elif self._vote == 'scorelimit': self.confirmScoreLimit() elif self._vote == 'timelimit': self.confirmTimeLimit() elif self._vote == 'roundlength': self.confirmRoundLength() elif self._vote == 'roundlimit': self.confirmRoundLimit() else: self.error('Unable to commit. Vote: %s, Value: %s' % (self._vote, self._value)) self._vote = None self._value = None self._amt_no = [] self._amt_yes = [] self._allplayers = [] def denyVote(self): if self._vote: self.console.say('^3Vote failed!') self._vote = None self._value = None self._amt_no = [] self._amt_yes = [] self._allplayers = [] def confirmKick(self): # Note - to kick someone we need: client.kick(reason, keyword, admin, silent=True/False, data) s = self._kickRequested self.debug('Kick vote passed. Kicking %s' % s.name) s.kick('Voted against', '', None, True, '') self._kickRequested = None def confirmMap(self): # This will cycle to next map when needed. self.console.write('map %s' % self._aMaps[self._mapRequested]) self._mapRequested = None def confirmMaprestart(self): # This will restart the current map self.console.write('fast_restart') def confirmFriendlyFire(self): # This will toggle friendly fire on and off setting = self._value if not isinstance(setting, int): if self._value == 'on': setting = 1 elif self._value == 'off': setting = 0 else: self.debug('Unknown wanted setting for Friendlyfire. Toggling to next mode') now = self.console.getCvar('scr_team_fftype').getInt() if now >= 1: setting = 0 elif now == 0: setting = 1 self.console.setCvar('scr_team_fftype', int(setting)) def confirmKillCam(self): # rcon for killcam: scr_game_allowkillcam - 0 or 1 setting = self._value if self._value == 'on': setting = 1 elif self._value == 'off': setting = 0 if not isinstance(setting, int): try: setting = int(setting) except ValueError: now = self.console.getCvar('scr_game_allowkillcam').getInt() self.debug('Setting being voted for is not valid. Toggling to next mode. Killcam currently: %s' % now) if now == 0: setting = 1 else: setting = 0 self.console.setCvar('scr_game_allowkillcam', int(setting)) def confirmScoreLimit(self): # CVAR to write is scr_<gametype>_scorelimit <number> setting = self._value gt = self.getGameType() if not isinstance(setting, int): try: setting = int(setting) except ValueError: self.debug('ERROR: Could not set new scorelimit. Voted value is not integer') return cparams = 'scr_' + gt + '_scorelimit' self.console.setCvar(cparams, setting) def confirmTimeLimit(self): setting = self._value gt = self.getGameType() if not isinstance(setting, int): try: setting = int(setting) except ValueError: self.debug('ERROR: Could not set new timelimit. Voted value is not integer') return cparams = 'scr_' + gt + '_timelimit' self.console.setCvar(cparams, setting) def confirmRoundLength(self): setting = self._value amodes = ['ctf', 'sd', 're', 'bas', 'dom'] gt = self.getGameType() if not isinstance(setting, int): try: setting = int(setting) except ValueError: self.debug('ERROR: Could not set new round length. Voted value is not integer') return if gt in amodes: cparams = 'scr_' + gt + '_roundlength' self.console.setCvar(cparams, setting) def confirmRoundLimit(self): setting = self._value amodes = ['ctf', 'sd', 're', 'bas', 'dom'] gt = self.getGameType() if not isinstance(setting, int): try: setting = int(setting) except ValueError: self.debug('Could not set new round limit. Voted value is not integer') return if gt in amodes: cparams = 'scr_' + gt + '_roundlimit' self.console.setCvar(cparams, setting) else: self.debug('Could not set round limit as gametype do not have rounds') def getGameType(self): gametype = self.console.getCvar('g_gametype').getString() if gametype: return gametype else: self.debug('Error getting gametype. Response is %s' % gametype) return False def sendBroadcast(self): # This wil broadcast vote message to server. a = self._value if a == 'maprestart' or a == 'maprotate': self.console.say(self.getMessage(self._vote)) elif a != 'maprestart' and a != 'maprotate': param = {'s': a} self.console.say(self.getMessage(self._vote, param)) self.console.say(self.getMessage('tovote')) def aquireCmdLock2(self, cmd, client, delay, all=True): if client.maxLevel >= 20: return True elif cmd.time + 5 <= self.console.time(): return True else: return False def checkIfAllowed(self, client, voteType): if client.maxLevel >= self._aVotes[voteType]: return True else: return False ################################################################################# # COMMANDS # ################################################################################# def cmd_vote(self, data, client, cmd=None): """\ !vote <setting> <value> - vote to change setting or cvar on server. """ # Check if vote already in progress if self._vote: client.message('^1ERROR^7: Vote already in progress') return # Check if we have enough data for vote data = data.split() if len(data) == 1 and data[0] == 'maprotate' or len(data) == 1 and data[0] == 'maprestart' or len(data) == 1 and data[0] == 'maps': self._vote = data[0] self._value = data[0] elif len(data) == 2: type = data[0] value = data[1] self._vote = type self._value = value else: client.message('^1ERROR^7: Invalid usage. Type ^2!help vote ^7for info') return # Check if player is asking what maps can be voted on if self._vote == 'maps': v1 = self.checkIfAllowed(client, 'map') v2 = self.checkIfAllowed(client, 'nextmap') if v1 or v2: cmd.sayLoudOrPM(client, 'Vote enabled maps: ^2%s' % (('^7, ^2').join(self._aMaps.keys()))) self._vote = None self._value = None return else: client.message('^2You do not have permission to call map votes') self._vote = None self._value = None return # Check if enough players in game to vote and store present players. Only players present at vote call can vote playersInGame = 0 self._allplayers = [] for c in self.console.clients.getList(): if c.team != b3.TEAM_SPEC: playersInGame += 1 self._allplayers.insert(0, c) if playersInGame <= 1 and client.maxLevel < 100: client.message('^1ABORT^7: Not enough players in game to vote.') self._vote = None return # Check if type of vote is allowed if self._vote not in self._aVotes: client.message('Vote type not allowed. Use ^2!allvotes ^7for available votes.') self._vote = None return # Check if player has permission to call vote type v = self.checkIfAllowed(client, self._vote) if not v: client.message('You do not have permission to call this vote') self._vote = None return # Get further info for proper processing if self._vote == 'map' or self._vote == 'nextmap': q = self.mapvote(client, self._value) if not q: self.debug('Vote aborted: Cannot vote for maps. mapvote turned out false') self._vote = None return if self._vote == 'kick': self._kickRequested = self.adminPlugin.findClientPrompt(self._value, client) if self._kickRequested: if self._kickRequested.maxLevel >= 20: client.message('^1ABORTED^7: Cannot vote to kick admin!') self._vote = None self._value = None self._kickRequested = None return self._value = self._kickRequested.name else: self.debug('could not get the person to kick') self._vote = None self._value = None self._kickRequested = None return # Seems like vote is ok. Broadcast to server self.sendBroadcast() # Start timer self.voteTimer() # Set person who called vote as yes vote self._amt_yes.insert(0, client) if len(self._amt_yes) > (len(self._allplayers) / 2): self.confirmVote() def cmd_allvotes(self, data, client, cmd=None): """\ Show all the votes you are allowed to call """ allowed = [] for k in self._aVotes.keys(): if client.maxLevel >= self._aVotes[k]: allowed.insert(0, k) if len(allowed) > 0: p = sorted(allowed) x = (', ').join(p) client.message('Allowed votes are: %s' % x) elif len(allowed) == 0: client.message('You are not allowed to call any votes') def cmd_yes(self, data, client, cmd=None): """\ Vote yes to the vote in progress """ # Check if there is a vote in progress if not self._vote: client.message('No vote in progress') return # Check if player is allowed to vote if client not in self._allplayers: client.message('Sorry, you cannot enter current vote') return # Check if the player already voted. If not, register vote if client in self._amt_yes or client in self._amt_no: client.message('Are you drunk? You already voted!') return elif client not in self._amt_yes or client not in self._amt_no: self._amt_yes.insert(0, client) # Let player know that vote is registered client.message('^3Your vote has been entered') # Check if majority of players voted already vYes = len(self._amt_yes) vPass = len(self._allplayers) / 2 if vYes > vPass: self.confirmVote() def cmd_no(self, data, client=None, cmd=None): """\ Vote NO to the current vote """ # Check if there is a vote in progress if not self._vote: client.message('No vote in progress') return # Check if player is allowed to vote if client not in self._allplayers: client.message('Sorry, you cannot enter current vote') return # Check if the player already voted if client in self._amt_yes or client in self._amt_no: client.message('Are you drunk? You already voted!') return elif client not in self._amt_yes or client not in self._amt_no: self._amt_no.insert(0, client) # Let player know that vote is registered client.message('^3Your vote has been entered') # Check if majority of players voted vNo = len(self._amt_no) vPass = len(self._allplayers) / 2 if vNo > vPass: self.denyVote() def cmd_nextmap(self, data, client=None, cmd=None): """\ - list the next map in rotation """ if not self.aquireCmdLock2(cmd, client, 60, True): client.message('^7Do not spam commands') return if self._mapRequested: cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % self._mapRequested.title()) return mapname = self.console.getNextMap() if mapname: cmd.sayLoudOrPM(client, '^7Next Map: ^2%s' % mapname) else: client.message('^1Error:^7 could not get map list') def cmd_maprotate(self, data, client, cmd=None): """\ Cycle to next map in rotation """ if self._mapRequested: self.confirmMap() else: self.console.rotateMap() def cmd_veto(self, data, client, cmd=None): """\ Cancel a vote in progress """ if self._vote: client.message('^3Vote canceled') self.denyVote() elif not self._vote: client.message('^3No vote in progress')
37.081456
139
0.550804
20,227
0.945364
0
0
0
0
0
0
7,184
0.335764
538362192f9fc22f5fcaa82bb61990dd548e6c63
3,947
py
Python
utils.py
bianan/cfl
e09043d213c7330d5410e27ba90c943d4323dbe8
[ "Apache-2.0" ]
4
2020-07-29T10:18:59.000Z
2021-06-27T22:57:37.000Z
utils.py
bianan/cfl
e09043d213c7330d5410e27ba90c943d4323dbe8
[ "Apache-2.0" ]
null
null
null
utils.py
bianan/cfl
e09043d213c7330d5410e27ba90c943d4323dbe8
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for manipulating variables in Federated personalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf TRAIN_NAME = "Train" VALIDATION_NAME = "Validation" TEST_NAME = "Test" LOSS_NAME = "loss" LOSS_SUMMARY_NAME = "perplexity" # Vars type. VARS_TYPE_ALL = "all" VARS_TYPE_SHARED = "shared" VARS_TYPE_PERSONAL = "personal" def get_train_name_scope(var_scope): return "/".join((var_scope, TRAIN_NAME)) def get_validation_name_scope(var_scope): return "/".join((var_scope, VALIDATION_NAME)) def get_test_name_scope(var_scope): return "/".join((var_scope, TEST_NAME)) def get_model_name_scope(var_scope): return "/".join((var_scope, "Model")) def get_update_name_scope(var_scope): return "/".join((var_scope, "Update")) def get_var_dict(vars_): """Gets a dict of var base_name (e.g. 'w') to the variable.""" var_dict = {} for v in vars_: var_base_name = get_base_name(v) var_dict[var_base_name] = v return var_dict def get_var_value_ops(var_dict): return {k: v.value() for k, v in var_dict.items()} def get_base_name(var): return var.name.split("/")[-1].split(":")[0] def get_update_name(var, var_scope): var_base_name = get_base_name(var) var_update_name = "update_%s_%s" % (var_scope, var_base_name) return var_update_name def get_update_placeholder_name(var): var_base_name = get_base_name(var) placeholder_name = "placeholder_%s" % var_base_name return placeholder_name def generate_update_ops(vars_): """Generates update ops and placeholders. For each var, it generates a placeholder to feed in the new values. Then it takes the mean of the inputs along dimension 0. Args: vars_: Vars for which the update ops will be generated. Returns: update_ops: A list of update ops. dict_update_placeholders: A dict of var base name to its update-placeholder. """ update_ops = [] dict_update_placeholders = {} for v in vars_: # For every var in the scope, add a placeholder to feed in the new values. # The placeholder may need to hold multiple values, this happens # when updating the server from many clients. var_in_shape = [None] + v.shape.as_list() var_in_name = get_update_placeholder_name(v) var_in = tf.placeholder(v.dtype, shape=var_in_shape, name=var_in_name) var_in_mean = tf.reduce_mean(var_in, 0) update_op = v.assign(var_in_mean) update_ops.append(update_op) dict_update_placeholders[get_base_name(v)] = var_in return update_ops, dict_update_placeholders def print_vars_on_clients(clients, sess): for c in clients.values(): print("client %d:" % c.id) print(sess.run(c.read_ops_all_vars)) def add_prefix(prefix, name): """Adds prefix to name.""" return "/".join((prefix, name)) def add_suffix(suffix, name): """Adds subfix to name.""" return "/".join((name, suffix)) def get_attribute_dict(class_instance): """Gets a dict of attributeds of a class instance.""" # first start by grabbing the Class items attribute_dict = dict((x, y) for x, y in class_instance.__class__.__dict__.items() if x[:2] != "__") # then update the class items with the instance items attribute_dict.update(class_instance.__dict__) return attribute_dict
28.395683
80
0.727135
0
0
0
0
0
0
0
0
1,629
0.412719
53840797fa9f83c58be0cb1122c4f31c4c62dc94
4,841
py
Python
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
mueller/mysql-shell
29bafc5692bd536a12c4e41c54cb587375fe52cf
[ "Apache-2.0" ]
119
2016-04-14T14:16:22.000Z
2022-03-08T20:24:38.000Z
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
mueller/mysql-shell
29bafc5692bd536a12c4e41c54cb587375fe52cf
[ "Apache-2.0" ]
9
2017-04-26T20:48:42.000Z
2021-09-07T01:52:44.000Z
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py
mueller/mysql-shell
29bafc5692bd536a12c4e41c54cb587375fe52cf
[ "Apache-2.0" ]
51
2016-07-20T05:06:48.000Z
2022-03-09T01:20:53.000Z
# Assumptions: validate_crud_functions available # Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port> from __future__ import print_function from mysqlsh import mysqlx mySession = mysqlx.get_session(__uripwd) ensure_schema_does_not_exist(mySession, 'js_shell_test') schema = mySession.create_schema('js_shell_test') # Creates a test collection and inserts data into it collection = schema.create_collection('collection1') result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA01", "name": 'jack', "age": 17, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA02", "name": 'adam', "age": 15, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA03", "name": 'brian', "age": 14, "gender": 'male'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA04", "name": 'alma', "age": 13, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA05", "name": 'carol', "age": 14, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA06", "name": 'donna', "age": 16, "gender": 'female'}).execute() result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA07", "name": 'angel', "age": 14, "gender": 'male'}).execute() # ------------------------------------------------ # collection.remove Unit Testing: Dynamic Behavior # ------------------------------------------------ #@ CollectionRemove: valid operations after remove crud = collection.remove('some_condition') validate_crud_functions(crud, ['sort', 'limit', 'bind', 'execute']) #@ CollectionRemove: valid operations after sort crud = crud.sort(['name']) validate_crud_functions(crud, ['limit', 'bind', 'execute']) #@ CollectionRemove: valid operations after limit crud = crud.limit(1) validate_crud_functions(crud, ['bind', 'execute']) #@ CollectionRemove: valid operations after bind crud = collection.remove('name = :data').bind('data', 'donna') validate_crud_functions(crud, ['bind', 'execute']) #@ CollectionRemove: valid operations after execute result = crud.execute() validate_crud_functions(crud, ['limit', 'bind', 'execute']) #@ Reusing CRUD with binding print('Deleted donna:', result.affected_items_count, '\n') result=crud.bind('data', 'alma').execute() print('Deleted alma:', result.affected_items_count, '\n') # ---------------------------------------------- # collection.remove Unit Testing: Error Conditions # ---------------------------------------------- #@# CollectionRemove: Error conditions on remove crud = collection.remove() crud = collection.remove(' ') crud = collection.remove(5) crud = collection.remove('test = "2') #@# CollectionRemove: Error conditions sort crud = collection.remove('some_condition').sort() crud = collection.remove('some_condition').sort(5) crud = collection.remove('some_condition').sort([]) crud = collection.remove('some_condition').sort(['name', 5]) crud = collection.remove('some_condition').sort('name', 5) #@# CollectionRemove: Error conditions on limit crud = collection.remove('some_condition').limit() crud = collection.remove('some_condition').limit('') #@# CollectionRemove: Error conditions on bind crud = collection.remove('name = :data and age > :years').bind() crud = collection.remove('name = :data and age > :years').bind(5, 5) crud = collection.remove('name = :data and age > :years').bind('another', 5) #@# CollectionRemove: Error conditions on execute crud = collection.remove('name = :data and age > :years').execute() crud = collection.remove('name = :data and age > :years').bind('years', 5).execute() # --------------------------------------- # collection.remove Unit Testing: Execution # --------------------------------------- #@ CollectionRemove: remove under condition //! [CollectionRemove: remove under condition] result = collection.remove('age = 15').execute() print('Affected Rows:', result.affected_items_count, '\n') docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') //! [CollectionRemove: remove under condition] #@ CollectionRemove: remove with binding //! [CollectionRemove: remove with binding] result = collection.remove('gender = :heorshe').limit(2).bind('heorshe', 'male').execute() print('Affected Rows:', result.affected_items_count, '\n') //! [CollectionRemove: remove with binding] docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') #@ CollectionRemove: full remove //! [CollectionRemove: full remove] result = collection.remove('1').execute() print('Affected Rows:', result.affected_items_count, '\n') docs = collection.find().execute().fetch_all() print('Records Left:', len(docs), '\n') //! [CollectionRemove: full remove] # Cleanup mySession.drop_schema('js_shell_test') mySession.close()
41.732759
126
0.685602
0
0
0
0
0
0
0
0
2,456
0.507333
538622f0e20beb2e31f0c54850a3e278464da569
1,323
py
Python
indian-flag.py
aditya270520/indian-flag
65851eefdd229cca150d2bbe8fa61c9e06e120e0
[ "MIT" ]
null
null
null
indian-flag.py
aditya270520/indian-flag
65851eefdd229cca150d2bbe8fa61c9e06e120e0
[ "MIT" ]
null
null
null
indian-flag.py
aditya270520/indian-flag
65851eefdd229cca150d2bbe8fa61c9e06e120e0
[ "MIT" ]
null
null
null
import turtle turtle.bgcolor('black') wn=turtle.Screen() tr=turtle.Turtle() move=1 tr.speed("fastest") for i in range (360): tr.write("ADITYA",'false','center',font=('Showcard gothic',50)) tr.penup() tr.goto(-200,100) tr.pendown() tr.color("orange") tr.right(move) tr.forward(100) tr.penup() tr.color("white") tr.pendown() tr.right(30) tr.forward(60) tr.pendown() tr.color("light green") tr.left(10) tr.forward(50) tr.right(70) tr.penup() tr.pendown() tr.color('light blue') tr.forward(50) tr.color('light green') tr.pu() tr.pd() tr.color("light blue") tr.forward(100) tr.color('brown') tr.forward(200) tr.pu() tr.pd() tr.color('light green') tr.circle(2) tr.color('light blue') tr.circle(4) tr.pu() tr.fd(20) tr.pd() tr.circle(6) tr.pu() tr.fd(40) tr.pd() tr.circle(8) tr.pu() tr.fd(80) tr.pd() tr.circle(10) tr.pu() tr.fd(120) tr.pd() tr.circle(20) tr.color('yellow') tr.circle(10) tr.pu() tr.pd() tr.color('white') tr.forward(150) tr.color('red') tr.fd(50) tr.color ('blue') tr.begin_fill() tr.penup() tr.home() move=move+1 tr.penup() tr.forward(50) turtle.done()
17.64
67
0.543462
0
0
0
0
0
0
0
0
179
0.135299
538700fd5d58b1e117fad14517de686aecad4c56
171
py
Python
leaf/rbac/model/__init__.py
guiqiqi/leaf
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
[ "Apache-2.0" ]
119
2020-01-30T04:25:03.000Z
2022-03-27T07:15:45.000Z
leaf/rbac/model/__init__.py
guiqiqi/leaf
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
[ "Apache-2.0" ]
8
2020-02-02T05:49:47.000Z
2021-01-25T03:31:09.000Z
leaf/rbac/model/__init__.py
guiqiqi/leaf
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
[ "Apache-2.0" ]
11
2020-01-31T15:07:11.000Z
2021-03-24T03:47:48.000Z
"""用户, 组, 及相关认证数据库模型""" from .group import Group from .user import User from .user import UserIndex from .auth import Authentication from .accesspoint import AccessPoint
21.375
36
0.783626
0
0
0
0
0
0
0
0
49
0.248731
53898a41d0b3979d97ed59d9bf3e85e1664af2da
103
py
Python
programacao basica/7.py
m-brito/Neps-Academy
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
[ "MIT" ]
null
null
null
programacao basica/7.py
m-brito/Neps-Academy
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
[ "MIT" ]
null
null
null
programacao basica/7.py
m-brito/Neps-Academy
0d962fb921d74c5f97f10fcdd8a0f464c0ccdb14
[ "MIT" ]
null
null
null
bino = int(input()) cino = int(input()) if (bino+cino)%2==0: print("Bino") else: print("Cino")
14.714286
20
0.563107
0
0
0
0
0
0
0
0
12
0.116505
5389a92b434b224efc0d211777895516ff271648
1,023
py
Python
update_readme.py
CalmScout/LeetCode
62720934b5906e6b255c7e91d3a6fa1d713e4391
[ "MIT" ]
null
null
null
update_readme.py
CalmScout/LeetCode
62720934b5906e6b255c7e91d3a6fa1d713e4391
[ "MIT" ]
null
null
null
update_readme.py
CalmScout/LeetCode
62720934b5906e6b255c7e91d3a6fa1d713e4391
[ "MIT" ]
null
null
null
""" Script updates `README.md` with respect to files at ./easy and ./medium folders. """ import os curr_dir = os.path.dirname(__file__) with open(os.path.join(curr_dir, "README.md"), 'w') as readme: readme.write("# LeetCode\nDeliberate practice in coding.\n") langs = [l for l in os.listdir(curr_dir) if os.path.isdir(os.path.join(curr_dir, l)) and l[0] != '.'] for lang in langs: readme.write("## {}\n".format(lang)) readme.write("### Easy\n") easy = sorted(os.listdir(f"{curr_dir}/{lang}/easy")) easy = [x.split("_")[0] for x in easy] easy_solved = "" for el in easy: easy_solved += "{}, ".format(el) readme.write(easy_solved[:-2] + "\n") readme.write("### Medium\n") medium = sorted(os.listdir(f"{curr_dir}/{lang}/medium")) medium = [x.split("_")[0] for x in medium] medium_solved = "" for el in medium: medium_solved += "{}, ".format(el) readme.write(medium_solved[:-2] + '\n')
39.346154
105
0.572825
0
0
0
0
0
0
0
0
268
0.261975
538a493d99ff3d905d532327c5a14418aa3d3b7e
10,614
py
Python
scripts/biotimesql.py
Jay-Iam/retriever
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
[ "MIT" ]
null
null
null
scripts/biotimesql.py
Jay-Iam/retriever
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
[ "MIT" ]
1
2019-02-23T14:11:34.000Z
2019-02-28T21:18:51.000Z
scripts/biotimesql.py
harshitbansal05/retriever
a5b849ee5ed3cc8a92f8aff93e5ec2ba54599213
[ "MIT" ]
1
2020-01-06T11:37:54.000Z
2020-01-06T11:37:54.000Z
# -*- coding: utf-8 -*- #retriever import csv from pkg_resources import parse_version from retriever.lib.models import Table from retriever.lib.templates import Script try: from retriever.lib.defaults import VERSION try: from retriever.lib.tools import open_fr, open_fw, open_csvw except ImportError: from retriever.lib.scripts import open_fr, open_fw except ImportError: from retriever import open_fr, open_fw, VERSION class main(Script): def __init__(self, **kwargs): Script.__init__(self, **kwargs) self.title = "Commercial Fisheries Monthly Trade Data by Product, Country/Association" self.name = "biotimesql" self.retriever_minimum_version = "2.2.0" self.urls = { "sql_file": "https://zenodo.org/record/2602708/files/BioTIMESQL02_04_2018.sql?download=1", } self.version = "1.0.1" self.ref = "https://zenodo.org/record/1095628#.WskN7dPwYyn" self.citation = "Dornelas M, Antão LH, Moyes F, et al. BioTIME: A database of biodiversity time series for the Anthropocene. Global Ecology & Biogeography. 2018; 00:1 - 26. https://doi.org/10.1111/geb.12729." self.description = "The BioTIME database has species identities and abundances in ecological assemblages through time." self.keywords = ["Time series", "Anthropocene", "Global"] self.licenses = [{"name": "CC BY 4.0"}] self.encoding = "latin1" if parse_version(VERSION) <= parse_version("2.0.0"): self.shortname = self.name self.name = self.title self.tags = self.keywords def download(self, engine=None, debug=False): Script.download(self, engine, debug) engine = self.engine original_sql_file = "BioTIMESQL02_04_2018.sql" engine.download_file(self.urls["sql_file"], original_sql_file) sql_data = open_fr(self.engine.format_filename(original_sql_file)) set_open = False csv_writer = None csv_file = None table_name = None NULL = None for line in sql_data: table_indicator = "-- Table structure for table " if line.startswith(table_indicator): st = line[len(table_indicator):].replace("`", "") table_name = st.strip() current_file_process = table_name current_file_open = current_file_process if set_open and not current_file_process == current_file_open: csv_file.close() set_open = False else: out_file = "{name}.csv".format(name=table_name) csv_file = open_fw(engine.format_filename(out_file)) csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL) set_open = True if line.startswith("INSERT INTO `{table_name}`".format(table_name=table_name)): row_val = line[line.index("VALUES (") + 8:-3] table_rows = row_val.replace("\r\n","").split("),(") for i_row in table_rows: v = eval('[' + str(i_row) + ']') csv_writer.writerows([v]) if csv_file: csv_file.close() # Create abundance table table = Table("ID_ABUNDANCE", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_ABUNDANCE", ("int",)), ("ABUNDANCE_TYPE", ("char", "100")), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("abundance.csv")) # Create allrawdata table table = Table("allrawdata", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_ALL_RAW_DATA", ("int",)), ("ABUNDANCE", ("double",)), ("BIOMASS", ("double",)), ("ID_SPECIES", ("int",)), ("SAMPLE_DESC", ("char", 200)), ("PLOT", ("char", 150)), ("LATITUDE", ("double",)), ("LONGITUDE", ("double",)), ("DEPTH", ("double",)), ("DAY", ("int",)), ("MONTH", ("int",)), ("YEAR", ("int",)), ("STUDY_ID", ("int",)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("allrawdata.csv")) # Create biomass table table = Table("biomass", delimiter=",", header_rows=0, contains_pk=False) table.columns = [("ID_BIOMASS", ("int",)), ("BIOMASS_TYPE", ("char", "100"))] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("biomass.csv")) # Create citation1 table table = Table("citation1", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_CITATION1", ("int",)), ("STUDY_ID", ("int",)), ("CITATION_LINE", ("char",)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("citation1.csv")) # Create contacts table table = Table("contacts", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_CONTACTS", ("int",)), ("STUDY_ID", ("int",)), ("CONTACT_1", ("char", 500)), ("CONTACT_2", ("char", 500)), ("CONT_1_MAIL", ("char", 60)), ("CONT_2_MAIL", ("char", 60)), ("LICENSE", ("char", 200)), ("WEB_LINK", ("char", 200)), ("DATA_SOURCE", ("char", 250)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("contacts.csv")) # Create countries table table = Table("countries", delimiter=",", header_rows=0, contains_pk=False) table.columns = [("COUNT_ID", ("int",)), ("COUNTRY_NAME", ("char", 200))] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("countries.csv")) # Create curation table table = Table("curation", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_CURATION", ("int",)), ("STUDY_ID", ("int",)), ("LINK_ID", ("int",)), ("COMMENTS", ("char",)), ("DATE_STUDY_ADDED", ("char", 50)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("curation.csv")) # Create datasets table table = Table("datasets", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_DATASETS", ("int",)), ("STUDY_ID", ("int",)), ("TAXA", ("char", 50)), ("ORGANISMS", ("char", 200)), ("TITLE", ("char",800)), ("AB_BIO", ("char", 2)), ("HAS_PLOT", ("char", 10)), ("DATA_POINTS", ("char",)), ("START_YEAR", ("char",)), ("END_YEAR", ("char",)), ("CENT_LAT", ("double",)), ("CENT_LONG", ("double",)), ("NUMBER_OF_SPECIES", ("char",)), ("NUMBER_OF_SAMPLES", ("char",)), ("NUMBER_LAT_LONG", ("char",)), ("TOTAL", ("char",)), ("GRAIN_SIZE_TEXT", ("char",)), ("GRAIN_SQ_KM", ("double",)), ("AREA_SQ_KM", ("double",)), ("AB_TYPE", ("char", )), ("BIO_TYPE", ("char",)), ("SAMPLE_TYPE", ("char",)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("datasets.csv")) # Create downloads table table = Table("downloads", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("D_ID", ("int",)), ("STUDY", ("char", 25)), ("NAME", ("char", 150)), ("EMAIL", ("char", 150)), ("COUNTRY", ("char", 200)), ("ROLE", ("char", 150)), ("PURPOSE", ("char", 500)), ("LOCATION", ("char", 250)), ("DATE_STAMP", ("char",)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("downloads.csv")) # Create methods table table = Table("methods", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_METHODS", ("int",)), ("STUDY_ID", ("int",)), ("METHODS", ("char",)), ("SUMMARY_METHODS", ("char", 500)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("methods.csv")) # Create sample table table = Table("sample", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_SAMPLE", ("int",)), ("ID_TREAT", ("int",)), ("SAMPLE_DESC_NAME", ("char", 200)), ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("sample.csv")) # Create site table table = Table("site", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_SITE", ("int",)), ("STUDY_ID", ("int",)), ("REALM", ("char", 11)), ("CLIMATE", ("char", 20)), ("GENERAL_TREAT", ("char", 200)), ("TREATMENT", ("char", 200)), ("TREAT_COMMENTS", ("char", 250)), ("TREAT_DATE", ("char", 100)), ("CEN_LATITUDE", ("double",)), ("CEN_LONGITUDE", ("double",)), ("HABITAT", ("char", 100)), ("PROTECTED_AREA", ("char", 50)), ("AREA", ("double",)), ("BIOME_MAP", ("char", 500)) ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("site.csv")) # Create species table table = Table("species", delimiter=",", header_rows=0, contains_pk=False) table.columns = [ ("ID_SPECIES", ("int",)), ("GENUS", ("char", 100)), ("SPECIES", ("char", 100)), ("GENUS_SPECIES", ("char", 100)) ] engine.table = table engine.create_table() engine.insert_data_from_file(engine.format_filename("species.csv")) SCRIPT = main()
39.022059
216
0.531939
10,138
0.955064
0
0
0
0
0
0
2,997
0.282336