body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def _get_latest_log_files(name, testing=False) -> List[str]: 'Returns a list with the paths of all available logfiles for `name` sorted by latest first.' log_dir = dirs.get_log_dir(name) files = filter((lambda filename: (name in filename)), os.listdir(log_dir)) files = filter((lambda filename: (('testing' in filename) if testing else ('testing' not in filename))), files) return [os.path.join(log_dir, filename) for filename in sorted(files, reverse=True)]
2,716,176,745,821,204,000
Returns a list with the paths of all available logfiles for `name` sorted by latest first.
aw_core/log.py
_get_latest_log_files
minhlt9196/activeseconds-aw-core
python
def _get_latest_log_files(name, testing=False) -> List[str]: log_dir = dirs.get_log_dir(name) files = filter((lambda filename: (name in filename)), os.listdir(log_dir)) files = filter((lambda filename: (('testing' in filename) if testing else ('testing' not in filename))), files) return [os.path.join(log_dir, filename) for filename in sorted(files, reverse=True)]
def get_latest_log_file(name, testing=False) -> Optional[str]: '\n Returns the filename of the last logfile with `name`.\n Useful when you want to read the logfile of another TimeBench service.\n ' last_logs = _get_latest_log_files(name, testing=testing) return (last_logs[0] if last_logs else None)
-1,499,907,841,220,113,000
Returns the filename of the last logfile with `name`. Useful when you want to read the logfile of another TimeBench service.
aw_core/log.py
get_latest_log_file
minhlt9196/activeseconds-aw-core
python
def get_latest_log_file(name, testing=False) -> Optional[str]: '\n Returns the filename of the last logfile with `name`.\n Useful when you want to read the logfile of another TimeBench service.\n ' last_logs = _get_latest_log_files(name, testing=testing) return (last_logs[0] if last_logs else None)
def log_format(x): 'Used to give JsonFormatter proper parameter format' return ['%({0:s})'.format(i) for i in x]
4,862,817,577,818,626,000
Used to give JsonFormatter proper parameter format
aw_core/log.py
log_format
minhlt9196/activeseconds-aw-core
python
def log_format(x): return ['%({0:s})'.format(i) for i in x]
def fib_sum(limit): '\n Считает сумму положительных чисел Фибаначи, которые меньше указанного значения\n ' u1 = 1 u2 = 2 summ = 2 _u = (u1 + u2) while (_u < limit): if ((_u % 2) == 0): summ += _u u1 = u2 u2 = _u _u = (u1 + u2) return summ
-7,042,709,406,166,169,000
Считает сумму положительных чисел Фибаначи, которые меньше указанного значения
t002-fibanatchi.py
fib_sum
z1365/euler
python
def fib_sum(limit): '\n \n ' u1 = 1 u2 = 2 summ = 2 _u = (u1 + u2) while (_u < limit): if ((_u % 2) == 0): summ += _u u1 = u2 u2 = _u _u = (u1 + u2) return summ
def test_angular_momentum_and_linear_momentum(): "A rod with length 2l, centroidal inertia I, and mass M along with a\n particle of mass m fixed to the end of the rod rotate with an angular rate\n of omega about point O which is fixed to the non-particle end of the rod.\n The rod's reference frame is A and the inertial frame is N." (m, M, l, I) = symbols('m, M, l, I') omega = dynamicsymbols('omega') N = ReferenceFrame('N') a = ReferenceFrame('a') O = Point('O') Ac = O.locatenew('Ac', (l * N.x)) P = Ac.locatenew('P', (l * N.x)) O.set_vel(N, (0 * N.x)) a.set_ang_vel(N, (omega * N.z)) Ac.v2pt_theory(O, N, a) P.v2pt_theory(O, N, a) Pa = Particle('Pa', P, m) A = RigidBody('A', Ac, a, M, ((I * outer(N.z, N.z)), Ac)) expected = (((((2 * m) * omega) * l) * N.y) + (((M * l) * omega) * N.y)) assert (linear_momentum(N, A, Pa) == expected) raises(TypeError, (lambda : angular_momentum(N, N, A, Pa))) raises(TypeError, (lambda : angular_momentum(O, O, A, Pa))) raises(TypeError, (lambda : angular_momentum(O, N, O, Pa))) expected = ((((I + (M * (l ** 2))) + ((4 * m) * (l ** 2))) * omega) * N.z) assert (angular_momentum(O, N, A, Pa) == expected)
-4,550,189,536,131,389,400
A rod with length 2l, centroidal inertia I, and mass M along with a particle of mass m fixed to the end of the rod rotate with an angular rate of omega about point O which is fixed to the non-particle end of the rod. The rod's reference frame is A and the inertial frame is N.
sympy/physics/mechanics/tests/test_functions.py
test_angular_momentum_and_linear_momentum
Abhi58/sympy
python
def test_angular_momentum_and_linear_momentum(): "A rod with length 2l, centroidal inertia I, and mass M along with a\n particle of mass m fixed to the end of the rod rotate with an angular rate\n of omega about point O which is fixed to the non-particle end of the rod.\n The rod's reference frame is A and the inertial frame is N." (m, M, l, I) = symbols('m, M, l, I') omega = dynamicsymbols('omega') N = ReferenceFrame('N') a = ReferenceFrame('a') O = Point('O') Ac = O.locatenew('Ac', (l * N.x)) P = Ac.locatenew('P', (l * N.x)) O.set_vel(N, (0 * N.x)) a.set_ang_vel(N, (omega * N.z)) Ac.v2pt_theory(O, N, a) P.v2pt_theory(O, N, a) Pa = Particle('Pa', P, m) A = RigidBody('A', Ac, a, M, ((I * outer(N.z, N.z)), Ac)) expected = (((((2 * m) * omega) * l) * N.y) + (((M * l) * omega) * N.y)) assert (linear_momentum(N, A, Pa) == expected) raises(TypeError, (lambda : angular_momentum(N, N, A, Pa))) raises(TypeError, (lambda : angular_momentum(O, O, A, Pa))) raises(TypeError, (lambda : angular_momentum(O, N, O, Pa))) expected = ((((I + (M * (l ** 2))) + ((4 * m) * (l ** 2))) * omega) * N.z) assert (angular_momentum(O, N, A, Pa) == expected)
def to_timedelta(arg, unit=None, errors='raise'): '\n Convert argument to timedelta.\n\n Timedeltas are absolute differences in times, expressed in difference\n units (e.g. days, hours, minutes, seconds). This method converts\n an argument from a recognized timedelta format / value into\n a Timedelta type.\n\n Parameters\n ----------\n arg : str, timedelta, list-like or Series\n The data to be converted to timedelta. The character M by itself,\n e.g. \'1M\', is treated as minute, not month. The characters Y and y\n are treated as the mean length of the Gregorian calendar year -\n 365.2425 days or 365 days 5 hours 49 minutes 12 seconds.\n unit : str, optional\n Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.\n\n Possible values:\n\n * \'W\'\n * \'D\' / \'days\' / \'day\'\n * \'hours\' / \'hour\' / \'hr\' / \'h\'\n * \'m\' / \'minute\' / \'min\' / \'minutes\' / \'T\'\n * \'S\' / \'seconds\' / \'sec\' / \'second\'\n * \'ms\' / \'milliseconds\' / \'millisecond\' / \'milli\' / \'millis\' / \'L\'\n * \'us\' / \'microseconds\' / \'microsecond\' / \'micro\' / \'micros\' / \'U\'\n * \'ns\' / \'nanoseconds\' / \'nano\' / \'nanos\' / \'nanosecond\' / \'N\'\n\n .. versionchanged:: 1.1.0\n\n Must not be specified when `arg` context strings and\n ``errors="raise"``.\n\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n - If \'raise\', then invalid parsing will raise an exception.\n - If \'coerce\', then invalid parsing will be set as NaT.\n - If \'ignore\', then invalid parsing will return the input.\n\n Returns\n -------\n timedelta64 or numpy.array of timedelta64\n Output type returned if parsing succeeded.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n convert_dtypes : Convert dtypes.\n\n Examples\n --------\n Parsing a single string to a Timedelta:\n\n >>> pd.to_timedelta(\'1 days 06:05:01.00003\')\n Timedelta(\'1 days 06:05:01.000030\')\n >>> pd.to_timedelta(\'15.5us\')\n Timedelta(\'0 days 00:00:00.000015500\')\n\n Parsing a list or array of strings:\n\n >>> pd.to_timedelta([\'1 days 06:05:01.00003\', \'15.5us\', \'nan\'])\n TimedeltaIndex([\'1 days 06:05:01.000030\', \'0 days 00:00:00.000015500\', NaT],\n dtype=\'timedelta64[ns]\', freq=None)\n\n Converting numbers by specifying the `unit` keyword argument:\n\n >>> pd.to_timedelta(np.arange(5), unit=\'s\')\n TimedeltaIndex([\'0 days 00:00:00\', \'0 days 00:00:01\', \'0 days 00:00:02\',\n \'0 days 00:00:03\', \'0 days 00:00:04\'],\n dtype=\'timedelta64[ns]\', freq=None)\n >>> pd.to_timedelta(np.arange(5), unit=\'d\')\n TimedeltaIndex([\'0 days\', \'1 days\', \'2 days\', \'3 days\', \'4 days\'],\n dtype=\'timedelta64[ns]\', freq=None)\n ' if (unit is not None): unit = parse_timedelta_unit(unit) if (errors not in ('ignore', 'raise', 'coerce')): raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}") if (unit in {'Y', 'y', 'M'}): raise ValueError("Units 'M', 'Y', and 'y' are no longer supported, as they do not represent unambiguous timedelta values durations.") if (arg is None): return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name) elif (isinstance(arg, np.ndarray) and (arg.ndim == 0)): arg = arg.item() elif (is_list_like(arg) and (getattr(arg, 'ndim', 1) == 1)): return _convert_listlike(arg, unit=unit, errors=errors) elif (getattr(arg, 'ndim', 1) > 1): raise TypeError('arg must be a string, timedelta, list, tuple, 1-d array, or Series') if (isinstance(arg, str) and (unit is not None)): raise ValueError('unit must not be specified if the input is/contains a str') return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
-4,619,909,879,631,674,000
Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. The character M by itself, e.g. '1M', is treated as minute, not month. The characters Y and y are treated as the mean length of the Gregorian calendar year - 365.2425 days or 365 days 5 hours 49 minutes 12 seconds. unit : str, optional Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``. Possible values: * 'W' * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L' * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U' * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N' .. versionchanged:: 1.1.0 Must not be specified when `arg` context strings and ``errors="raise"``. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta64 or numpy.array of timedelta64 Output type returned if parsing succeeded. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. convert_dtypes : Convert dtypes. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015500') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03', '0 days 00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
pandas/core/tools/timedeltas.py
to_timedelta
cdeil/pandas
python
def to_timedelta(arg, unit=None, errors='raise'): '\n Convert argument to timedelta.\n\n Timedeltas are absolute differences in times, expressed in difference\n units (e.g. days, hours, minutes, seconds). This method converts\n an argument from a recognized timedelta format / value into\n a Timedelta type.\n\n Parameters\n ----------\n arg : str, timedelta, list-like or Series\n The data to be converted to timedelta. The character M by itself,\n e.g. \'1M\', is treated as minute, not month. The characters Y and y\n are treated as the mean length of the Gregorian calendar year -\n 365.2425 days or 365 days 5 hours 49 minutes 12 seconds.\n unit : str, optional\n Denotes the unit of the arg for numeric `arg`. Defaults to ``"ns"``.\n\n Possible values:\n\n * \'W\'\n * \'D\' / \'days\' / \'day\'\n * \'hours\' / \'hour\' / \'hr\' / \'h\'\n * \'m\' / \'minute\' / \'min\' / \'minutes\' / \'T\'\n * \'S\' / \'seconds\' / \'sec\' / \'second\'\n * \'ms\' / \'milliseconds\' / \'millisecond\' / \'milli\' / \'millis\' / \'L\'\n * \'us\' / \'microseconds\' / \'microsecond\' / \'micro\' / \'micros\' / \'U\'\n * \'ns\' / \'nanoseconds\' / \'nano\' / \'nanos\' / \'nanosecond\' / \'N\'\n\n .. versionchanged:: 1.1.0\n\n Must not be specified when `arg` context strings and\n ``errors="raise"``.\n\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n - If \'raise\', then invalid parsing will raise an exception.\n - If \'coerce\', then invalid parsing will be set as NaT.\n - If \'ignore\', then invalid parsing will return the input.\n\n Returns\n -------\n timedelta64 or numpy.array of timedelta64\n Output type returned if parsing succeeded.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n convert_dtypes : Convert dtypes.\n\n Examples\n --------\n Parsing a single string to a Timedelta:\n\n >>> pd.to_timedelta(\'1 days 06:05:01.00003\')\n Timedelta(\'1 days 06:05:01.000030\')\n >>> pd.to_timedelta(\'15.5us\')\n Timedelta(\'0 days 00:00:00.000015500\')\n\n Parsing a list or array of strings:\n\n >>> pd.to_timedelta([\'1 days 06:05:01.00003\', \'15.5us\', \'nan\'])\n TimedeltaIndex([\'1 days 06:05:01.000030\', \'0 days 00:00:00.000015500\', NaT],\n dtype=\'timedelta64[ns]\', freq=None)\n\n Converting numbers by specifying the `unit` keyword argument:\n\n >>> pd.to_timedelta(np.arange(5), unit=\'s\')\n TimedeltaIndex([\'0 days 00:00:00\', \'0 days 00:00:01\', \'0 days 00:00:02\',\n \'0 days 00:00:03\', \'0 days 00:00:04\'],\n dtype=\'timedelta64[ns]\', freq=None)\n >>> pd.to_timedelta(np.arange(5), unit=\'d\')\n TimedeltaIndex([\'0 days\', \'1 days\', \'2 days\', \'3 days\', \'4 days\'],\n dtype=\'timedelta64[ns]\', freq=None)\n ' if (unit is not None): unit = parse_timedelta_unit(unit) if (errors not in ('ignore', 'raise', 'coerce')): raise ValueError("errors must be one of 'ignore', 'raise', or 'coerce'}") if (unit in {'Y', 'y', 'M'}): raise ValueError("Units 'M', 'Y', and 'y' are no longer supported, as they do not represent unambiguous timedelta values durations.") if (arg is None): return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name) elif (isinstance(arg, np.ndarray) and (arg.ndim == 0)): arg = arg.item() elif (is_list_like(arg) and (getattr(arg, 'ndim', 1) == 1)): return _convert_listlike(arg, unit=unit, errors=errors) elif (getattr(arg, 'ndim', 1) > 1): raise TypeError('arg must be a string, timedelta, list, tuple, 1-d array, or Series') if (isinstance(arg, str) and (unit is not None)): raise ValueError('unit must not be specified if the input is/contains a str') return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors)
def _coerce_scalar_to_timedelta_type(r, unit='ns', errors='raise'): "Convert string 'r' to a timedelta object." try: result = Timedelta(r, unit) except ValueError: if (errors == 'raise'): raise elif (errors == 'ignore'): return r result = NaT return result
-6,240,162,069,825,605,000
Convert string 'r' to a timedelta object.
pandas/core/tools/timedeltas.py
_coerce_scalar_to_timedelta_type
cdeil/pandas
python
def _coerce_scalar_to_timedelta_type(r, unit='ns', errors='raise'): try: result = Timedelta(r, unit) except ValueError: if (errors == 'raise'): raise elif (errors == 'ignore'): return r result = NaT return result
def _convert_listlike(arg, unit=None, errors='raise', name=None): 'Convert a list of objects to a timedelta index object.' if (isinstance(arg, (list, tuple)) or (not hasattr(arg, 'dtype'))): arg = np.array(list(arg), dtype=object) try: value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] except ValueError: if (errors == 'ignore'): return arg else: raise from pandas import TimedeltaIndex value = TimedeltaIndex(value, unit='ns', name=name) return value
6,420,085,564,580,702,000
Convert a list of objects to a timedelta index object.
pandas/core/tools/timedeltas.py
_convert_listlike
cdeil/pandas
python
def _convert_listlike(arg, unit=None, errors='raise', name=None): if (isinstance(arg, (list, tuple)) or (not hasattr(arg, 'dtype'))): arg = np.array(list(arg), dtype=object) try: value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] except ValueError: if (errors == 'ignore'): return arg else: raise from pandas import TimedeltaIndex value = TimedeltaIndex(value, unit='ns', name=name) return value
def __init__(self, mrns: Optional[Set[str]]=None, dimensions: Optional[Set[str]]=None, clause=None, data_columns=None, **kwargs): '\n Args:\n mrns: Set of MRN-Strings for which the condition is true.\n children: List of child conditions which were combined with an\n operator.\n operator: String representing the combination of the child\n condition (e.g. ``_BaseCondition.AND``)\n dimensions: A set of tables that need to be joined on the\n ``base_table``\n clause: The SQLAlchemy clause of the current\n condition to select patients.\n data_columns: The SQLAlchemy data_columns that should\n be returned when ``.get_data()`` is called.\n ' super().__init__(**kwargs) self.dimensions = (dimensions or set()) self._clause = (sql.true() if (clause is None) else clause) self.data_columns = (data_columns or [])
6,810,422,015,448,396,000
Args: mrns: Set of MRN-Strings for which the condition is true. children: List of child conditions which were combined with an operator. operator: String representing the combination of the child condition (e.g. ``_BaseCondition.AND``) dimensions: A set of tables that need to be joined on the ``base_table`` clause: The SQLAlchemy clause of the current condition to select patients. data_columns: The SQLAlchemy data_columns that should be returned when ``.get_data()`` is called.
fiber/condition/database.py
__init__
hpi-dhc/fiber
python
def __init__(self, mrns: Optional[Set[str]]=None, dimensions: Optional[Set[str]]=None, clause=None, data_columns=None, **kwargs): '\n Args:\n mrns: Set of MRN-Strings for which the condition is true.\n children: List of child conditions which were combined with an\n operator.\n operator: String representing the combination of the child\n condition (e.g. ``_BaseCondition.AND``)\n dimensions: A set of tables that need to be joined on the\n ``base_table``\n clause: The SQLAlchemy clause of the current\n condition to select patients.\n data_columns: The SQLAlchemy data_columns that should\n be returned when ``.get_data()`` is called.\n ' super().__init__(**kwargs) self.dimensions = (dimensions or set()) self._clause = (sql.true() if (clause is None) else clause) self.data_columns = (data_columns or [])
@property def base_table(self) -> Table: '\n Must be set by subclasses to the database table which the class uses to\n select patients and data. This is also used to optimize queries on the\n same table.\n ' raise NotImplementedError
-9,211,096,098,496,574,000
Must be set by subclasses to the database table which the class uses to select patients and data. This is also used to optimize queries on the same table.
fiber/condition/database.py
base_table
hpi-dhc/fiber
python
@property def base_table(self) -> Table: '\n Must be set by subclasses to the database table which the class uses to\n select patients and data. This is also used to optimize queries on the\n same table.\n ' raise NotImplementedError
@property def _default_columns(self): '\n Must be set by subclasses.\n\n This should return an array of columns which are in the result table of\n ``._create_query()``. These columns will be returned by default when\n ``.get_data()`` is called.\n ' raise NotImplementedError
8,196,578,915,108,377,000
Must be set by subclasses. This should return an array of columns which are in the result table of ``._create_query()``. These columns will be returned by default when ``.get_data()`` is called.
fiber/condition/database.py
_default_columns
hpi-dhc/fiber
python
@property def _default_columns(self): '\n Must be set by subclasses.\n\n This should return an array of columns which are in the result table of\n ``._create_query()``. These columns will be returned by default when\n ``.get_data()`` is called.\n ' raise NotImplementedError
@property def mrn_column(self): '\n Must be set by subclasses.\n\n This is used to specify the column in the result table of\n ``._create_query()`` which is holding the MRNs.\n ' raise NotImplementedError
-4,138,846,600,661,252,600
Must be set by subclasses. This is used to specify the column in the result table of ``._create_query()`` which is holding the MRNs.
fiber/condition/database.py
mrn_column
hpi-dhc/fiber
python
@property def mrn_column(self): '\n Must be set by subclasses.\n\n This is used to specify the column in the result table of\n ``._create_query()`` which is holding the MRNs.\n ' raise NotImplementedError
@property def age_column(self): '\n Must be set by subclasses.\n\n This is used to specify the column in the result table of\n ``._create_query()`` which is holding the age in days.\n ' raise NotImplementedError
-211,457,758,802,844,300
Must be set by subclasses. This is used to specify the column in the result table of ``._create_query()`` which is holding the age in days.
fiber/condition/database.py
age_column
hpi-dhc/fiber
python
@property def age_column(self): '\n Must be set by subclasses.\n\n This is used to specify the column in the result table of\n ``._create_query()`` which is holding the age in days.\n ' raise NotImplementedError
@property def data_columns(self): '\n Returns columns which are in the result table of\n ``._create_query()``. These columns will be returned when\n ``.get_data()`` is called.\n ' return [str(col) for col in (self._specified_columns or self._default_columns)]
-1,208,874,599,390,524,400
Returns columns which are in the result table of ``._create_query()``. These columns will be returned when ``.get_data()`` is called.
fiber/condition/database.py
data_columns
hpi-dhc/fiber
python
@property def data_columns(self): '\n Returns columns which are in the result table of\n ``._create_query()``. These columns will be returned when\n ``.get_data()`` is called.\n ' return [str(col) for col in (self._specified_columns or self._default_columns)]
@property def clause(self): '\n Returns the clause of the current condition or runs\n ``._create_clause()`` to create it.\n ' if (not isinstance(self._clause, sql.elements.True_)): return self._clause else: return self._create_clause()
-8,420,029,666,076,889,000
Returns the clause of the current condition or runs ``._create_clause()`` to create it.
fiber/condition/database.py
clause
hpi-dhc/fiber
python
@property def clause(self): '\n Returns the clause of the current condition or runs\n ``._create_clause()`` to create it.\n ' if (not isinstance(self._clause, sql.elements.True_)): return self._clause else: return self._create_clause()
def _create_clause(self): '\n Should be overwritten by subclasses to create a SQLAlchemy clause based\n on the defined condition. It is used to select the correct patients.\n ' return sql.true()
-2,366,013,195,273,496,600
Should be overwritten by subclasses to create a SQLAlchemy clause based on the defined condition. It is used to select the correct patients.
fiber/condition/database.py
_create_clause
hpi-dhc/fiber
python
def _create_clause(self): '\n Should be overwritten by subclasses to create a SQLAlchemy clause based\n on the defined condition. It is used to select the correct patients.\n ' return sql.true()
def _create_query(self) -> orm.Query: '\n Must be implemented by subclasses to return an instance of a SQLAlchemy\n query which only returns MRNs.\n\n This query should yield all medical record numbers in the\n ``base_table`` of the condition. It uses the ``.clause`` to select\n the relevant patients.\n\n This query is also used by other function which change the selected\n columns to get data about the patients.\n ' raise NotImplementedError
4,027,134,133,308,272,600
Must be implemented by subclasses to return an instance of a SQLAlchemy query which only returns MRNs. This query should yield all medical record numbers in the ``base_table`` of the condition. It uses the ``.clause`` to select the relevant patients. This query is also used by other function which change the selected columns to get data about the patients.
fiber/condition/database.py
_create_query
hpi-dhc/fiber
python
def _create_query(self) -> orm.Query: '\n Must be implemented by subclasses to return an instance of a SQLAlchemy\n query which only returns MRNs.\n\n This query should yield all medical record numbers in the\n ``base_table`` of the condition. It uses the ``.clause`` to select\n the relevant patients.\n\n This query is also used by other function which change the selected\n columns to get data about the patients.\n ' raise NotImplementedError
def _fetch_mrns(self, limit: Optional[int]=None): 'Fetches MRNs from the results of ``._create_query()``.' q = self._create_query() if limit: q = q.limit(limit) mrn_df = read_with_progress(q.statement, self.engine) if mrn_df.empty: mrn_df = pd.DataFrame(columns=['medical_record_number']) assert (len(mrn_df.columns) == 1), '_create_query must return only MRNs' result = set((mrn for mrn in mrn_df.iloc[:, 0])) return result
-8,381,494,308,814,900,000
Fetches MRNs from the results of ``._create_query()``.
fiber/condition/database.py
_fetch_mrns
hpi-dhc/fiber
python
def _fetch_mrns(self, limit: Optional[int]=None): q = self._create_query() if limit: q = q.limit(limit) mrn_df = read_with_progress(q.statement, self.engine) if mrn_df.empty: mrn_df = pd.DataFrame(columns=['medical_record_number']) assert (len(mrn_df.columns) == 1), '_create_query must return only MRNs' result = set((mrn for mrn in mrn_df.iloc[:, 0])) return result
def _fetch_data(self, included_mrns: Optional[Set]=None, limit: Optional[int]=None): '\n Fetches the data defined with ``.data_columns`` for each patient\n defined by this condition and via ``included_mrns`` from the results of\n ``._create_query()``.\n ' q = self._create_query() if included_mrns: q = q.filter(self.mrn_column.in_(included_mrns)) if limit: q = q.limit(limit) q = q.with_entities(*self.data_columns).distinct() result = read_with_progress(q.statement, self.engine, silent=bool(included_mrns)) return result
5,996,257,617,156,554,000
Fetches the data defined with ``.data_columns`` for each patient defined by this condition and via ``included_mrns`` from the results of ``._create_query()``.
fiber/condition/database.py
_fetch_data
hpi-dhc/fiber
python
def _fetch_data(self, included_mrns: Optional[Set]=None, limit: Optional[int]=None): '\n Fetches the data defined with ``.data_columns`` for each patient\n defined by this condition and via ``included_mrns`` from the results of\n ``._create_query()``.\n ' q = self._create_query() if included_mrns: q = q.filter(self.mrn_column.in_(included_mrns)) if limit: q = q.limit(limit) q = q.with_entities(*self.data_columns).distinct() result = read_with_progress(q.statement, self.engine, silent=bool(included_mrns)) return result
def example_values(self): "\n Returns ten values of the current condition.\n\n Example:\n >>> Patient(gender='Female', religion='Hindu').example_values()\n " return self.get_data(limit=10)
1,292,491,153,136,693,200
Returns ten values of the current condition. Example: >>> Patient(gender='Female', religion='Hindu').example_values()
fiber/condition/database.py
example_values
hpi-dhc/fiber
python
def example_values(self): "\n Returns ten values of the current condition.\n\n Example:\n >>> Patient(gender='Female', religion='Hindu').example_values()\n " return self.get_data(limit=10)
def values_per(self, *columns: Set[str]): '\n Counts occurence of unique values in the specified columns.\n ' return self._grouped_count('*', *columns, label='values')
-8,414,278,900,176,086,000
Counts occurence of unique values in the specified columns.
fiber/condition/database.py
values_per
hpi-dhc/fiber
python
def values_per(self, *columns: Set[str]): '\n \n ' return self._grouped_count('*', *columns, label='values')
def patients_per(self, *columns: Set[str]): '\n Counts distinct patients for unique values in the specified columns.\n ' return self._grouped_count(self.mrn_column.distinct(), *columns, label='patients')
1,264,087,047,943,287,300
Counts distinct patients for unique values in the specified columns.
fiber/condition/database.py
patients_per
hpi-dhc/fiber
python
def patients_per(self, *columns: Set[str]): '\n \n ' return self._grouped_count(self.mrn_column.distinct(), *columns, label='patients')
def distinct(self, *columns: Set[str]): 'Returns distinct values based on the specified ``columns``' if (not columns): raise ValueError('Supply one or multiple columns as arguments.') q = self._create_query() q = q.with_entities(*columns).distinct() return read_with_progress(q.statement, self.engine)
3,780,552,309,333,347,300
Returns distinct values based on the specified ``columns``
fiber/condition/database.py
distinct
hpi-dhc/fiber
python
def distinct(self, *columns: Set[str]): if (not columns): raise ValueError('Supply one or multiple columns as arguments.') q = self._create_query() q = q.with_entities(*columns).distinct() return read_with_progress(q.statement, self.engine)
def __or__(self, other: _BaseCondition): '\n The _DatabaseCondition optimizes the SQL statements for ``|`` by\n combining the clauses of condition which run on the same database\n table. This is done via the ``.base_table`` attribute.\n ' if ((self.base_table == other.base_table) and (not (self._mrns or other._mrns))): unique_columns = list(dict.fromkeys(chain(self.data_columns, other.data_columns))) return self.__class__(dimensions=(self.dimensions | other.dimensions), clause=(self.clause | other.clause), data_columns=unique_columns, children=[self, other], operator=_BaseCondition.OR) else: return _BaseCondition(mrns=(self.get_mrns() | other.get_mrns()), children=[self, other], operator=_BaseCondition.OR)
-943,715,415,913,724,000
The _DatabaseCondition optimizes the SQL statements for ``|`` by combining the clauses of condition which run on the same database table. This is done via the ``.base_table`` attribute.
fiber/condition/database.py
__or__
hpi-dhc/fiber
python
def __or__(self, other: _BaseCondition): '\n The _DatabaseCondition optimizes the SQL statements for ``|`` by\n combining the clauses of condition which run on the same database\n table. This is done via the ``.base_table`` attribute.\n ' if ((self.base_table == other.base_table) and (not (self._mrns or other._mrns))): unique_columns = list(dict.fromkeys(chain(self.data_columns, other.data_columns))) return self.__class__(dimensions=(self.dimensions | other.dimensions), clause=(self.clause | other.clause), data_columns=unique_columns, children=[self, other], operator=_BaseCondition.OR) else: return _BaseCondition(mrns=(self.get_mrns() | other.get_mrns()), children=[self, other], operator=_BaseCondition.OR)
def __repr__(self): 'Shows the running query or the resulting MRNs' if self._mrns: return f'{self.__class__.__name__}: {len(self.get_mrns())} mrns' else: clause = (compile_sqla(self.clause, self.engine) if fiber.config.VERBOSE else '...') return f'{self.__class__.__name__} ({clause})'
8,681,375,925,406,334,000
Shows the running query or the resulting MRNs
fiber/condition/database.py
__repr__
hpi-dhc/fiber
python
def __repr__(self): if self._mrns: return f'{self.__class__.__name__}: {len(self.get_mrns())} mrns' else: clause = (compile_sqla(self.clause, self.engine) if fiber.config.VERBOSE else '...') return f'{self.__class__.__name__} ({clause})'
def get_netmask(ip, subnet): 'Returns the netmask appropriate for injection into a guest.' if (ip['version'] == 4): return str(subnet.as_netaddr().netmask) return subnet.as_netaddr()._prefixlen
3,273,456,509,661,132,000
Returns the netmask appropriate for injection into a guest.
nova/network/model.py
get_netmask
Alex-Sizov/nova
python
def get_netmask(ip, subnet): if (ip['version'] == 4): return str(subnet.as_netaddr().netmask) return subnet.as_netaddr()._prefixlen
def get_meta(self, key, default=None): "calls get(key, default) on self['meta']." return self['meta'].get(key, default)
950,897,229,093,025,500
calls get(key, default) on self['meta'].
nova/network/model.py
get_meta
Alex-Sizov/nova
python
def get_meta(self, key, default=None): return self['meta'].get(key, default)
def as_netaddr(self): 'Convenient function to get cidr as a netaddr object.' return netaddr.IPNetwork(self['cidr'])
-1,883,720,367,734,164,000
Convenient function to get cidr as a netaddr object.
nova/network/model.py
as_netaddr
Alex-Sizov/nova
python
def as_netaddr(self): return netaddr.IPNetwork(self['cidr'])
def labeled_ips(self): "Returns the list of all IPs\n\n The return value looks like this flat structure::\n\n {'network_label': 'my_network',\n 'network_id': 'n8v29837fn234782f08fjxk3ofhb84',\n 'ips': [{'address': '0.0.0.0',\n 'version': 4,\n 'type: 'fixed',\n 'meta': {...}},\n {'address': '0.0.0.0',\n 'version': 4,\n 'type': 'floating',\n 'meta': {...}},\n {'address': 'fe80::4',\n 'version': 6,\n 'type': 'fixed',\n 'meta': {...}}]\n " if self['network']: ips = [IP(**ip) for ip in self.fixed_ips()] for ip in ips: del ip['meta']['floating_ips'] ips.extend(self.floating_ips()) return {'network_label': self['network']['label'], 'network_id': self['network']['id'], 'ips': ips} return []
3,862,719,527,362,788,000
Returns the list of all IPs The return value looks like this flat structure:: {'network_label': 'my_network', 'network_id': 'n8v29837fn234782f08fjxk3ofhb84', 'ips': [{'address': '0.0.0.0', 'version': 4, 'type: 'fixed', 'meta': {...}}, {'address': '0.0.0.0', 'version': 4, 'type': 'floating', 'meta': {...}}, {'address': 'fe80::4', 'version': 6, 'type': 'fixed', 'meta': {...}}]
nova/network/model.py
labeled_ips
Alex-Sizov/nova
python
def labeled_ips(self): "Returns the list of all IPs\n\n The return value looks like this flat structure::\n\n {'network_label': 'my_network',\n 'network_id': 'n8v29837fn234782f08fjxk3ofhb84',\n 'ips': [{'address': '0.0.0.0',\n 'version': 4,\n 'type: 'fixed',\n 'meta': {...}},\n {'address': '0.0.0.0',\n 'version': 4,\n 'type': 'floating',\n 'meta': {...}},\n {'address': 'fe80::4',\n 'version': 6,\n 'type': 'fixed',\n 'meta': {...}}]\n " if self['network']: ips = [IP(**ip) for ip in self.fixed_ips()] for ip in ips: del ip['meta']['floating_ips'] ips.extend(self.floating_ips()) return {'network_label': self['network']['label'], 'network_id': self['network']['id'], 'ips': ips} return []
def fixed_ips(self): 'Returns all fixed_ips without floating_ips attached.' return [ip for vif in self for ip in vif.fixed_ips()]
-4,682,522,704,627,597,000
Returns all fixed_ips without floating_ips attached.
nova/network/model.py
fixed_ips
Alex-Sizov/nova
python
def fixed_ips(self): return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self): 'Returns all floating_ips.' return [ip for vif in self for ip in vif.floating_ips()]
-8,499,458,769,679,128,000
Returns all floating_ips.
nova/network/model.py
floating_ips
Alex-Sizov/nova
python
def floating_ips(self): return [ip for vif in self for ip in vif.floating_ips()]
def wait(self, do_raise=True): 'Wait for asynchronous call to finish.' pass
1,854,355,436,926,017,300
Wait for asynchronous call to finish.
nova/network/model.py
wait
Alex-Sizov/nova
python
def wait(self, do_raise=True): pass
def _sync_wrapper(self, wrapped, *args, **kwargs): 'Synchronize the model before running a method.' self.wait() return wrapped(*args, **kwargs)
-5,115,433,604,785,415,000
Synchronize the model before running a method.
nova/network/model.py
_sync_wrapper
Alex-Sizov/nova
python
def _sync_wrapper(self, wrapped, *args, **kwargs): self.wait() return wrapped(*args, **kwargs)
def wait(self, do_raise=True): 'Wait for asynchronous call to finish.' if (self._gt is not None): try: self[:] = self._gt.wait() except Exception: if do_raise: raise finally: self._gt = None
1,603,477,702,000,076,500
Wait for asynchronous call to finish.
nova/network/model.py
wait
Alex-Sizov/nova
python
def wait(self, do_raise=True): if (self._gt is not None): try: self[:] = self._gt.wait() except Exception: if do_raise: raise finally: self._gt = None
def phrase_extractor(doc_sents): '\n doc_sents is a list where each element is a list with elements corresponding to individual sentences of a document\n ' Phraser = models.phrases.Phraser Phrases = models.phrases.Phrases sentence_stream = sum(doc_sents, []) common_terms = ['of', 'with', 'without', 'and', 'or', 'the', 'a', 'as'] phrases = Phrases(sentence_stream, common_terms=common_terms) bigram = Phraser(phrases) trigram = Phrases(bigram[sentence_stream]) output_strs = [] for idx in range(0, len(doc_sents)): doc = doc_sents[idx] output_doc = list(trigram[doc]) output_str = sum(output_doc, []) output_strs.append(' '.join(output_str)) return output_strs
7,986,132,740,859,710,000
doc_sents is a list where each element is a list with elements corresponding to individual sentences of a document
src/scraping/read_and_parse.py
phrase_extractor
avbatchelor/insight-articles-project
python
def phrase_extractor(doc_sents): '\n \n ' Phraser = models.phrases.Phraser Phrases = models.phrases.Phrases sentence_stream = sum(doc_sents, []) common_terms = ['of', 'with', 'without', 'and', 'or', 'the', 'a', 'as'] phrases = Phrases(sentence_stream, common_terms=common_terms) bigram = Phraser(phrases) trigram = Phrases(bigram[sentence_stream]) output_strs = [] for idx in range(0, len(doc_sents)): doc = doc_sents[idx] output_doc = list(trigram[doc]) output_str = sum(output_doc, []) output_strs.append(' '.join(output_str)) return output_strs
@property def height(self): '\n The effective height of this placeholder shape; its directly-applied\n height if it has one, otherwise the height of its parent layout\n placeholder.\n ' return self._effective_value('height')
7,468,113,242,768,823,000
The effective height of this placeholder shape; its directly-applied height if it has one, otherwise the height of its parent layout placeholder.
pptx/shapes/placeholder.py
height
Adriyst/python-pptx
python
@property def height(self): '\n The effective height of this placeholder shape; its directly-applied\n height if it has one, otherwise the height of its parent layout\n placeholder.\n ' return self._effective_value('height')
@property def left(self): '\n The effective left of this placeholder shape; its directly-applied\n left if it has one, otherwise the left of its parent layout\n placeholder.\n ' return self._effective_value('left')
-6,564,549,180,737,365,000
The effective left of this placeholder shape; its directly-applied left if it has one, otherwise the left of its parent layout placeholder.
pptx/shapes/placeholder.py
left
Adriyst/python-pptx
python
@property def left(self): '\n The effective left of this placeholder shape; its directly-applied\n left if it has one, otherwise the left of its parent layout\n placeholder.\n ' return self._effective_value('left')
@property def shape_type(self): '\n Member of :ref:`MsoShapeType` specifying the type of this shape.\n Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.\n Read-only.\n ' return MSO_SHAPE_TYPE.PLACEHOLDER
-2,477,442,499,677,329,000
Member of :ref:`MsoShapeType` specifying the type of this shape. Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case. Read-only.
pptx/shapes/placeholder.py
shape_type
Adriyst/python-pptx
python
@property def shape_type(self): '\n Member of :ref:`MsoShapeType` specifying the type of this shape.\n Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.\n Read-only.\n ' return MSO_SHAPE_TYPE.PLACEHOLDER
@property def top(self): '\n The effective top of this placeholder shape; its directly-applied\n top if it has one, otherwise the top of its parent layout\n placeholder.\n ' return self._effective_value('top')
-2,393,262,479,391,269,400
The effective top of this placeholder shape; its directly-applied top if it has one, otherwise the top of its parent layout placeholder.
pptx/shapes/placeholder.py
top
Adriyst/python-pptx
python
@property def top(self): '\n The effective top of this placeholder shape; its directly-applied\n top if it has one, otherwise the top of its parent layout\n placeholder.\n ' return self._effective_value('top')
@property def width(self): '\n The effective width of this placeholder shape; its directly-applied\n width if it has one, otherwise the width of its parent layout\n placeholder.\n ' return self._effective_value('width')
-1,524,664,289,232,801,500
The effective width of this placeholder shape; its directly-applied width if it has one, otherwise the width of its parent layout placeholder.
pptx/shapes/placeholder.py
width
Adriyst/python-pptx
python
@property def width(self): '\n The effective width of this placeholder shape; its directly-applied\n width if it has one, otherwise the width of its parent layout\n placeholder.\n ' return self._effective_value('width')
@property def _base_placeholder(self): '\n Return the layout or master placeholder shape this placeholder\n inherits from. Not to be confused with an instance of\n |BasePlaceholder| (necessarily).\n ' raise NotImplementedError('Must be implemented by all subclasses.')
3,763,772,925,094,577,000
Return the layout or master placeholder shape this placeholder inherits from. Not to be confused with an instance of |BasePlaceholder| (necessarily).
pptx/shapes/placeholder.py
_base_placeholder
Adriyst/python-pptx
python
@property def _base_placeholder(self): '\n Return the layout or master placeholder shape this placeholder\n inherits from. Not to be confused with an instance of\n |BasePlaceholder| (necessarily).\n ' raise NotImplementedError('Must be implemented by all subclasses.')
def _effective_value(self, attr_name): '\n The effective value of *attr_name* on this placeholder shape; its\n directly-applied value if it has one, otherwise the value on the\n layout placeholder it inherits from.\n ' directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name) if (directly_applied_value is not None): return directly_applied_value return self._inherited_value(attr_name)
3,257,120,458,678,706,700
The effective value of *attr_name* on this placeholder shape; its directly-applied value if it has one, otherwise the value on the layout placeholder it inherits from.
pptx/shapes/placeholder.py
_effective_value
Adriyst/python-pptx
python
def _effective_value(self, attr_name): '\n The effective value of *attr_name* on this placeholder shape; its\n directly-applied value if it has one, otherwise the value on the\n layout placeholder it inherits from.\n ' directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name) if (directly_applied_value is not None): return directly_applied_value return self._inherited_value(attr_name)
def _inherited_value(self, attr_name): "\n Return the attribute value, e.g. 'width' of the base placeholder this\n placeholder inherits from.\n " base_placeholder = self._base_placeholder if (base_placeholder is None): return None inherited_value = getattr(base_placeholder, attr_name) return inherited_value
-3,245,702,123,604,787,700
Return the attribute value, e.g. 'width' of the base placeholder this placeholder inherits from.
pptx/shapes/placeholder.py
_inherited_value
Adriyst/python-pptx
python
def _inherited_value(self, attr_name): "\n Return the attribute value, e.g. 'width' of the base placeholder this\n placeholder inherits from.\n " base_placeholder = self._base_placeholder if (base_placeholder is None): return None inherited_value = getattr(base_placeholder, attr_name) return inherited_value
@property def is_placeholder(self): '\n Boolean indicating whether this shape is a placeholder.\n Unconditionally |True| in this case.\n ' return True
-5,988,402,684,897,886,000
Boolean indicating whether this shape is a placeholder. Unconditionally |True| in this case.
pptx/shapes/placeholder.py
is_placeholder
Adriyst/python-pptx
python
@property def is_placeholder(self): '\n Boolean indicating whether this shape is a placeholder.\n Unconditionally |True| in this case.\n ' return True
@property def shape_type(self): '\n Member of :ref:`MsoShapeType` specifying the type of this shape.\n Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.\n Read-only.\n ' return MSO_SHAPE_TYPE.PLACEHOLDER
-2,477,442,499,677,329,000
Member of :ref:`MsoShapeType` specifying the type of this shape. Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case. Read-only.
pptx/shapes/placeholder.py
shape_type
Adriyst/python-pptx
python
@property def shape_type(self): '\n Member of :ref:`MsoShapeType` specifying the type of this shape.\n Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.\n Read-only.\n ' return MSO_SHAPE_TYPE.PLACEHOLDER
@property def _base_placeholder(self): '\n Return the layout placeholder this slide placeholder inherits from.\n Not to be confused with an instance of |BasePlaceholder|\n (necessarily).\n ' (layout, idx) = (self.part.slide_layout, self._element.ph_idx) return layout.placeholders.get(idx=idx)
-6,442,858,045,183,785,000
Return the layout placeholder this slide placeholder inherits from. Not to be confused with an instance of |BasePlaceholder| (necessarily).
pptx/shapes/placeholder.py
_base_placeholder
Adriyst/python-pptx
python
@property def _base_placeholder(self): '\n Return the layout placeholder this slide placeholder inherits from.\n Not to be confused with an instance of |BasePlaceholder|\n (necessarily).\n ' (layout, idx) = (self.part.slide_layout, self._element.ph_idx) return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element): "\n Substitute *element* for this placeholder element in the shapetree.\n This placeholder's `._element` attribute is set to |None| and its\n original element is free for garbage collection. Any attribute access\n (including a method call) on this placeholder after this call raises\n |AttributeError|.\n " element._nvXxPr.nvPr._insert_ph(self._element.ph) self._element.addprevious(element) self._element.getparent().remove(self._element) self._element = None
119,120,443,463,842,640
Substitute *element* for this placeholder element in the shapetree. This placeholder's `._element` attribute is set to |None| and its original element is free for garbage collection. Any attribute access (including a method call) on this placeholder after this call raises |AttributeError|.
pptx/shapes/placeholder.py
_replace_placeholder_with
Adriyst/python-pptx
python
def _replace_placeholder_with(self, element): "\n Substitute *element* for this placeholder element in the shapetree.\n This placeholder's `._element` attribute is set to |None| and its\n original element is free for garbage collection. Any attribute access\n (including a method call) on this placeholder after this call raises\n |AttributeError|.\n " element._nvXxPr.nvPr._insert_ph(self._element.ph) self._element.addprevious(element) self._element.getparent().remove(self._element) self._element = None
@property def idx(self): "\n Integer placeholder 'idx' attribute, e.g. 0\n " return self._sp.ph_idx
-5,393,356,322,313,205,000
Integer placeholder 'idx' attribute, e.g. 0
pptx/shapes/placeholder.py
idx
Adriyst/python-pptx
python
@property def idx(self): "\n \n " return self._sp.ph_idx
@property def orient(self): '\n Placeholder orientation, e.g. ST_Direction.HORZ\n ' return self._sp.ph_orient
-7,293,633,352,992,293,000
Placeholder orientation, e.g. ST_Direction.HORZ
pptx/shapes/placeholder.py
orient
Adriyst/python-pptx
python
@property def orient(self): '\n \n ' return self._sp.ph_orient
@property def ph_type(self): '\n Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE\n ' return self._sp.ph_type
6,022,595,088,119,431,000
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
pptx/shapes/placeholder.py
ph_type
Adriyst/python-pptx
python
@property def ph_type(self): '\n \n ' return self._sp.ph_type
@property def sz(self): "\n Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL\n " return self._sp.ph_sz
574,434,602,822,876,800
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
pptx/shapes/placeholder.py
sz
Adriyst/python-pptx
python
@property def sz(self): "\n \n " return self._sp.ph_sz
@property def _base_placeholder(self): '\n Return the master placeholder this layout placeholder inherits from.\n ' base_ph_type = {PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE, PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE, PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER, PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER, PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE}[self._element.ph_type] slide_master = self.part.slide_master return slide_master.placeholders.get(base_ph_type, None)
-8,909,599,504,009,577,000
Return the master placeholder this layout placeholder inherits from.
pptx/shapes/placeholder.py
_base_placeholder
Adriyst/python-pptx
python
@property def _base_placeholder(self): '\n \n ' base_ph_type = {PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE, PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE, PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER, PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER, PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY, PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE}[self._element.ph_type] slide_master = self.part.slide_master return slide_master.placeholders.get(base_ph_type, None)
@property def _base_placeholder(self): '\n Return the notes master placeholder this notes slide placeholder\n inherits from, or |None| if no placeholder of the matching type is\n present.\n ' notes_master = self.part.notes_master ph_type = self.element.ph_type return notes_master.placeholders.get(ph_type=ph_type)
-3,631,829,249,761,758,700
Return the notes master placeholder this notes slide placeholder inherits from, or |None| if no placeholder of the matching type is present.
pptx/shapes/placeholder.py
_base_placeholder
Adriyst/python-pptx
python
@property def _base_placeholder(self): '\n Return the notes master placeholder this notes slide placeholder\n inherits from, or |None| if no placeholder of the matching type is\n present.\n ' notes_master = self.part.notes_master ph_type = self.element.ph_type return notes_master.placeholders.get(ph_type=ph_type)
def insert_chart(self, chart_type, chart_data): '\n Return a |PlaceholderGraphicFrame| object containing a new chart of\n *chart_type* depicting *chart_data* and having the same position and\n size as this placeholder. *chart_type* is one of the\n :ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|\n object populated with the categories and series values for the chart.\n Note that the new |Chart| object is not returned directly. The chart\n object may be accessed using the\n :attr:`~.PlaceholderGraphicFrame.chart` property of the returned\n |PlaceholderGraphicFrame| object.\n ' rId = self.part.add_chart_part(chart_type, chart_data) graphicFrame = self._new_chart_graphicFrame(rId, self.left, self.top, self.width, self.height) self._replace_placeholder_with(graphicFrame) return PlaceholderGraphicFrame(graphicFrame, self._parent)
-5,297,237,133,140,146,000
Return a |PlaceholderGraphicFrame| object containing a new chart of *chart_type* depicting *chart_data* and having the same position and size as this placeholder. *chart_type* is one of the :ref:`XlChartType` enumeration values. *chart_data* is a |ChartData| object populated with the categories and series values for the chart. Note that the new |Chart| object is not returned directly. The chart object may be accessed using the :attr:`~.PlaceholderGraphicFrame.chart` property of the returned |PlaceholderGraphicFrame| object.
pptx/shapes/placeholder.py
insert_chart
Adriyst/python-pptx
python
def insert_chart(self, chart_type, chart_data): '\n Return a |PlaceholderGraphicFrame| object containing a new chart of\n *chart_type* depicting *chart_data* and having the same position and\n size as this placeholder. *chart_type* is one of the\n :ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|\n object populated with the categories and series values for the chart.\n Note that the new |Chart| object is not returned directly. The chart\n object may be accessed using the\n :attr:`~.PlaceholderGraphicFrame.chart` property of the returned\n |PlaceholderGraphicFrame| object.\n ' rId = self.part.add_chart_part(chart_type, chart_data) graphicFrame = self._new_chart_graphicFrame(rId, self.left, self.top, self.width, self.height) self._replace_placeholder_with(graphicFrame) return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy): '\n Return a newly created `p:graphicFrame` element having the specified\n position and size and containing the chart identified by *rId*.\n ' (id_, name) = (self.shape_id, self.name) return CT_GraphicalObjectFrame.new_chart_graphicFrame(id_, name, rId, x, y, cx, cy)
3,742,909,272,327,434,000
Return a newly created `p:graphicFrame` element having the specified position and size and containing the chart identified by *rId*.
pptx/shapes/placeholder.py
_new_chart_graphicFrame
Adriyst/python-pptx
python
def _new_chart_graphicFrame(self, rId, x, y, cx, cy): '\n Return a newly created `p:graphicFrame` element having the specified\n position and size and containing the chart identified by *rId*.\n ' (id_, name) = (self.shape_id, self.name) return CT_GraphicalObjectFrame.new_chart_graphicFrame(id_, name, rId, x, y, cx, cy)
def insert_picture(self, image_file): 'Return a |PlaceholderPicture| object depicting the image in `image_file`.\n\n `image_file` may be either a path (string) or a file-like object. The image is\n cropped to fill the entire space of the placeholder. A |PlaceholderPicture|\n object has all the properties and methods of a |Picture| shape except that the\n value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is\n `MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.\n ' pic = self._new_placeholder_pic(image_file) self._replace_placeholder_with(pic) return PlaceholderPicture(pic, self._parent)
-7,307,511,244,919,935,000
Return a |PlaceholderPicture| object depicting the image in `image_file`. `image_file` may be either a path (string) or a file-like object. The image is cropped to fill the entire space of the placeholder. A |PlaceholderPicture| object has all the properties and methods of a |Picture| shape except that the value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is `MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
pptx/shapes/placeholder.py
insert_picture
Adriyst/python-pptx
python
def insert_picture(self, image_file): 'Return a |PlaceholderPicture| object depicting the image in `image_file`.\n\n `image_file` may be either a path (string) or a file-like object. The image is\n cropped to fill the entire space of the placeholder. A |PlaceholderPicture|\n object has all the properties and methods of a |Picture| shape except that the\n value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is\n `MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.\n ' pic = self._new_placeholder_pic(image_file) self._replace_placeholder_with(pic) return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file): '\n Return a new `p:pic` element depicting the image in *image_file*,\n suitable for use as a placeholder. In particular this means not\n having an `a:xfrm` element, allowing its extents to be inherited from\n its layout placeholder.\n ' (rId, desc, image_size) = self._get_or_add_image(image_file) (shape_id, name) = (self.shape_id, self.name) pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId) pic.crop_to_fit(image_size, (self.width, self.height)) return pic
-1,111,839,340,163,950,600
Return a new `p:pic` element depicting the image in *image_file*, suitable for use as a placeholder. In particular this means not having an `a:xfrm` element, allowing its extents to be inherited from its layout placeholder.
pptx/shapes/placeholder.py
_new_placeholder_pic
Adriyst/python-pptx
python
def _new_placeholder_pic(self, image_file): '\n Return a new `p:pic` element depicting the image in *image_file*,\n suitable for use as a placeholder. In particular this means not\n having an `a:xfrm` element, allowing its extents to be inherited from\n its layout placeholder.\n ' (rId, desc, image_size) = self._get_or_add_image(image_file) (shape_id, name) = (self.shape_id, self.name) pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId) pic.crop_to_fit(image_size, (self.width, self.height)) return pic
def _get_or_add_image(self, image_file): '\n Return an (rId, description, image_size) 3-tuple identifying the\n related image part containing *image_file* and describing the image.\n ' (image_part, rId) = self.part.get_or_add_image_part(image_file) (desc, image_size) = (image_part.desc, image_part._px_size) return (rId, desc, image_size)
1,590,408,181,353,432,800
Return an (rId, description, image_size) 3-tuple identifying the related image part containing *image_file* and describing the image.
pptx/shapes/placeholder.py
_get_or_add_image
Adriyst/python-pptx
python
def _get_or_add_image(self, image_file): '\n Return an (rId, description, image_size) 3-tuple identifying the\n related image part containing *image_file* and describing the image.\n ' (image_part, rId) = self.part.get_or_add_image_part(image_file) (desc, image_size) = (image_part.desc, image_part._px_size) return (rId, desc, image_size)
@property def is_placeholder(self): '\n Boolean indicating whether this shape is a placeholder.\n Unconditionally |True| in this case.\n ' return True
-5,988,402,684,897,886,000
Boolean indicating whether this shape is a placeholder. Unconditionally |True| in this case.
pptx/shapes/placeholder.py
is_placeholder
Adriyst/python-pptx
python
@property def is_placeholder(self): '\n Boolean indicating whether this shape is a placeholder.\n Unconditionally |True| in this case.\n ' return True
@property def _base_placeholder(self): '\n Return the layout placeholder this picture placeholder inherits from.\n ' (layout, idx) = (self.part.slide_layout, self._element.ph_idx) return layout.placeholders.get(idx=idx)
-4,162,995,238,305,199,000
Return the layout placeholder this picture placeholder inherits from.
pptx/shapes/placeholder.py
_base_placeholder
Adriyst/python-pptx
python
@property def _base_placeholder(self): '\n \n ' (layout, idx) = (self.part.slide_layout, self._element.ph_idx) return layout.placeholders.get(idx=idx)
def insert_table(self, rows, cols): 'Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.\n\n The position and width of the table are those of the placeholder and its height\n is proportional to the number of rows. A |PlaceholderGraphicFrame| object has\n all the properties and methods of a |GraphicFrame| shape except that the value\n of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally\n `MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table\n but rather *contains* the new table. The table can be accessed using the\n :attr:`~.PlaceholderGraphicFrame.table` property of the returned\n |PlaceholderGraphicFrame| object.\n ' graphicFrame = self._new_placeholder_table(rows, cols) self._replace_placeholder_with(graphicFrame) return PlaceholderGraphicFrame(graphicFrame, self._parent)
-2,900,114,170,172,023,000
Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table. The position and width of the table are those of the placeholder and its height is proportional to the number of rows. A |PlaceholderGraphicFrame| object has all the properties and methods of a |GraphicFrame| shape except that the value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally `MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table but rather *contains* the new table. The table can be accessed using the :attr:`~.PlaceholderGraphicFrame.table` property of the returned |PlaceholderGraphicFrame| object.
pptx/shapes/placeholder.py
insert_table
Adriyst/python-pptx
python
def insert_table(self, rows, cols): 'Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.\n\n The position and width of the table are those of the placeholder and its height\n is proportional to the number of rows. A |PlaceholderGraphicFrame| object has\n all the properties and methods of a |GraphicFrame| shape except that the value\n of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally\n `MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table\n but rather *contains* the new table. The table can be accessed using the\n :attr:`~.PlaceholderGraphicFrame.table` property of the returned\n |PlaceholderGraphicFrame| object.\n ' graphicFrame = self._new_placeholder_table(rows, cols) self._replace_placeholder_with(graphicFrame) return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols): "\n Return a newly added `p:graphicFrame` element containing an empty\n table with *rows* rows and *cols* columns, positioned at the location\n of this placeholder and having its same width. The table's height is\n determined by the number of rows.\n " (shape_id, name, height) = (self.shape_id, self.name, Emu((rows * 370840))) return CT_GraphicalObjectFrame.new_table_graphicFrame(shape_id, name, rows, cols, self.left, self.top, self.width, height)
-2,623,100,528,436,390,000
Return a newly added `p:graphicFrame` element containing an empty table with *rows* rows and *cols* columns, positioned at the location of this placeholder and having its same width. The table's height is determined by the number of rows.
pptx/shapes/placeholder.py
_new_placeholder_table
Adriyst/python-pptx
python
def _new_placeholder_table(self, rows, cols): "\n Return a newly added `p:graphicFrame` element containing an empty\n table with *rows* rows and *cols* columns, positioned at the location\n of this placeholder and having its same width. The table's height is\n determined by the number of rows.\n " (shape_id, name, height) = (self.shape_id, self.name, Emu((rows * 370840))) return CT_GraphicalObjectFrame.new_table_graphicFrame(shape_id, name, rows, cols, self.left, self.top, self.width, height)
def get_asymmetry(arr): '\n Returns asymmetry of an array.\n \n Parameters\n ----------\n arr : ndarray\n an array to find asymmetry\n\n Returns\n -------\n ndarray\n an array capturing asymmetry of original array\n ' arr_rev = arr[::(- 1), ::(- 1), ::(- 1)] denom = ((arr + arr_rev) / 2.0) denom_nz = np.where((denom == 0), 1.0, denom) asym = np.where((denom > 0.0), (abs((arr - arr_rev)) / denom_nz), 0.0) return np.where((arr > 0), asym, 0)
4,152,776,174,522,038,300
Returns asymmetry of an array. Parameters ---------- arr : ndarray an array to find asymmetry Returns ------- ndarray an array capturing asymmetry of original array
scripts/alien_tools.py
get_asymmetry
AdvancedPhotonSource/cdisupp
python
def get_asymmetry(arr): '\n Returns asymmetry of an array.\n \n Parameters\n ----------\n arr : ndarray\n an array to find asymmetry\n\n Returns\n -------\n ndarray\n an array capturing asymmetry of original array\n ' arr_rev = arr[::(- 1), ::(- 1), ::(- 1)] denom = ((arr + arr_rev) / 2.0) denom_nz = np.where((denom == 0), 1.0, denom) asym = np.where((denom > 0.0), (abs((arr - arr_rev)) / denom_nz), 0.0) return np.where((arr > 0), asym, 0)
def analyze_clusters(arr, labels, nz): '\n Analyzes clusters and returns characteristics in arrays.\n \n Parameters\n ----------\n arr : ndarray\n the analyzed array\n labels: arr\n cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1.\n nz : tuple\n tuple of arrays, each array containing indices of elements in arr that are non-zero along one axis.\n \n Returns\n -------\n tuple\n tuple containing the following arrays:\n nlabels # number of labels, i.e clusters\n labels_arr # array with label for each non zero point\n rel_cluster_size # array with cluster size divided by max cluster size for each\n # non zero point\n cluster_avg # array with cluster average for each non zero point\n noise_arr # array with points that are non zero but not in cluster\n no_noise # array with noise poits set to 0\n label_counts # tuple of two arrays: First is label number, second is number of\n # occurances of that label (size of cluster)\n cluster_avg_asym # array with average asymmetry of a points in cluster\n asymmetry # array of asymmetry with regard to entire array\n cluster_size # array with cluster size for each non zero point\n ' labels_arr = np.zeros_like(arr) noise_arr = np.zeros_like(arr) cluster_size = np.zeros_like(arr) cluster_avg = np.zeros_like(arr).astype(np.float32) cluster_avg_asym = np.zeros_like(arr).astype(np.float32) asymmetry = get_asymmetry(arr) label_counts = np.unique(labels, return_counts=True) labels_arr[nz] = labels noise_pts = tuple([nz[n][(labels == (- 1))] for n in range(3)]) no_noise = arr noise_arr[noise_pts] = arr[noise_pts] no_noise[noise_pts] = 0 nlabels = len(label_counts[0]) for n in range(1, nlabels): n_lab = label_counts[0][n] cluspts = tuple([nz[d][(labels == n_lab)] for d in range(3)]) cluster_size[cluspts] = label_counts[1][n] cluster_avg[cluspts] = (np.sum(arr[cluspts]) / cluspts[0].size) cluster_avg_asym[cluspts] = (np.sum(asymmetry[cluspts]) / cluspts[0].size) rel_cluster_size = (cluster_size / cluster_size.max()) return (nlabels, labels_arr, rel_cluster_size, cluster_avg, noise_arr, no_noise, label_counts, cluster_avg_asym, asymmetry, cluster_size)
-8,017,559,099,873,137,000
Analyzes clusters and returns characteristics in arrays. Parameters ---------- arr : ndarray the analyzed array labels: arr cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. nz : tuple tuple of arrays, each array containing indices of elements in arr that are non-zero along one axis. Returns ------- tuple tuple containing the following arrays: nlabels # number of labels, i.e clusters labels_arr # array with label for each non zero point rel_cluster_size # array with cluster size divided by max cluster size for each # non zero point cluster_avg # array with cluster average for each non zero point noise_arr # array with points that are non zero but not in cluster no_noise # array with noise poits set to 0 label_counts # tuple of two arrays: First is label number, second is number of # occurances of that label (size of cluster) cluster_avg_asym # array with average asymmetry of a points in cluster asymmetry # array of asymmetry with regard to entire array cluster_size # array with cluster size for each non zero point
scripts/alien_tools.py
analyze_clusters
AdvancedPhotonSource/cdisupp
python
def analyze_clusters(arr, labels, nz): '\n Analyzes clusters and returns characteristics in arrays.\n \n Parameters\n ----------\n arr : ndarray\n the analyzed array\n labels: arr\n cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1.\n nz : tuple\n tuple of arrays, each array containing indices of elements in arr that are non-zero along one axis.\n \n Returns\n -------\n tuple\n tuple containing the following arrays:\n nlabels # number of labels, i.e clusters\n labels_arr # array with label for each non zero point\n rel_cluster_size # array with cluster size divided by max cluster size for each\n # non zero point\n cluster_avg # array with cluster average for each non zero point\n noise_arr # array with points that are non zero but not in cluster\n no_noise # array with noise poits set to 0\n label_counts # tuple of two arrays: First is label number, second is number of\n # occurances of that label (size of cluster)\n cluster_avg_asym # array with average asymmetry of a points in cluster\n asymmetry # array of asymmetry with regard to entire array\n cluster_size # array with cluster size for each non zero point\n ' labels_arr = np.zeros_like(arr) noise_arr = np.zeros_like(arr) cluster_size = np.zeros_like(arr) cluster_avg = np.zeros_like(arr).astype(np.float32) cluster_avg_asym = np.zeros_like(arr).astype(np.float32) asymmetry = get_asymmetry(arr) label_counts = np.unique(labels, return_counts=True) labels_arr[nz] = labels noise_pts = tuple([nz[n][(labels == (- 1))] for n in range(3)]) no_noise = arr noise_arr[noise_pts] = arr[noise_pts] no_noise[noise_pts] = 0 nlabels = len(label_counts[0]) for n in range(1, nlabels): n_lab = label_counts[0][n] cluspts = tuple([nz[d][(labels == n_lab)] for d in range(3)]) cluster_size[cluspts] = label_counts[1][n] cluster_avg[cluspts] = (np.sum(arr[cluspts]) / cluspts[0].size) cluster_avg_asym[cluspts] = (np.sum(asymmetry[cluspts]) / cluspts[0].size) rel_cluster_size = (cluster_size / cluster_size.max()) return (nlabels, labels_arr, rel_cluster_size, cluster_avg, noise_arr, no_noise, label_counts, cluster_avg_asym, asymmetry, cluster_size)
def crop_center(arr): '\n Finds max element in array and crops the array to be symetrical with regard to this point in each direction.\n \n Parameters\n ----------\n arr : ndarray\n an array\n\n Returns\n -------\n centered : ndarray\n an array symetrical in all dimensions around the max element of input array\n ' shape = arr.shape center = np.unravel_index(np.argmax(arr, axis=None), shape) principium = [] finis = [] for i in range(len(shape)): half_shape = min(center[i], ((shape[i] - center[i]) - 1)) principium.append((center[i] - half_shape)) finis.append(((center[i] + half_shape) + 1)) centered = arr[principium[0]:finis[0], principium[1]:finis[1], principium[2]:finis[2]] return centered
-1,185,450,012,814,468,600
Finds max element in array and crops the array to be symetrical with regard to this point in each direction. Parameters ---------- arr : ndarray an array Returns ------- centered : ndarray an array symetrical in all dimensions around the max element of input array
scripts/alien_tools.py
crop_center
AdvancedPhotonSource/cdisupp
python
def crop_center(arr): '\n Finds max element in array and crops the array to be symetrical with regard to this point in each direction.\n \n Parameters\n ----------\n arr : ndarray\n an array\n\n Returns\n -------\n centered : ndarray\n an array symetrical in all dimensions around the max element of input array\n ' shape = arr.shape center = np.unravel_index(np.argmax(arr, axis=None), shape) principium = [] finis = [] for i in range(len(shape)): half_shape = min(center[i], ((shape[i] - center[i]) - 1)) principium.append((center[i] - half_shape)) finis.append(((center[i] + half_shape) + 1)) centered = arr[principium[0]:finis[0], principium[1]:finis[1], principium[2]:finis[2]] return centered
def save_arr(arr, dir, fname): "\n Saves an array in 'tif' format file.\n \n Parameters\n ----------\n arr : ndarray\n an array to save\n dir : str\n directory to save the file to\n fname : str\n file name\n\n Returns\n -------\n nothing\n " if (dir is not None): full_name = os.path.join(dir, fname) else: full_name = fname tif.imsave(full_name, arr.transpose().astype(np.float32))
6,513,454,682,440,310,000
Saves an array in 'tif' format file. Parameters ---------- arr : ndarray an array to save dir : str directory to save the file to fname : str file name Returns ------- nothing
scripts/alien_tools.py
save_arr
AdvancedPhotonSource/cdisupp
python
def save_arr(arr, dir, fname): "\n Saves an array in 'tif' format file.\n \n Parameters\n ----------\n arr : ndarray\n an array to save\n dir : str\n directory to save the file to\n fname : str\n file name\n\n Returns\n -------\n nothing\n " if (dir is not None): full_name = os.path.join(dir, fname) else: full_name = fname tif.imsave(full_name, arr.transpose().astype(np.float32))
def save_arrays(arrs, iter, thresh, eps, dir): "\n Saves multiple arrays in 'tif' format files. Determines file name from given parameters: iteration, threshold, and eps.\n \n Parameters\n ----------\n arr : tuple\n a tuple of arrays to save\n iter, thresh, eps : str, str, str\n parameters: iteration, threshold, and eps, to deliver file name from\n dir : str\n directory to save the file to\n\n Returns\n -------\n nothing\n " save_arr(arrs[1], dir, ('db%d_%3.2f_labels_arr%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[2], dir, ('db%d_%3.2f_rel_clustersizes%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[3], dir, ('db%d_%3.2f_clusteravg%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[4], dir, ('db%d_%3.2f_noise%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[5], dir, ('db%d_%3.2f_no_noise%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[7], dir, ('db%d_%3.2f_clusteravgasym%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[8], dir, ('db%d_%3.2f_asym%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[9], dir, ('db%d_%3.2f_abs_clustersizes%3.2f.tif' % (iter, thresh, eps)))
7,043,521,248,600,779,000
Saves multiple arrays in 'tif' format files. Determines file name from given parameters: iteration, threshold, and eps. Parameters ---------- arr : tuple a tuple of arrays to save iter, thresh, eps : str, str, str parameters: iteration, threshold, and eps, to deliver file name from dir : str directory to save the file to Returns ------- nothing
scripts/alien_tools.py
save_arrays
AdvancedPhotonSource/cdisupp
python
def save_arrays(arrs, iter, thresh, eps, dir): "\n Saves multiple arrays in 'tif' format files. Determines file name from given parameters: iteration, threshold, and eps.\n \n Parameters\n ----------\n arr : tuple\n a tuple of arrays to save\n iter, thresh, eps : str, str, str\n parameters: iteration, threshold, and eps, to deliver file name from\n dir : str\n directory to save the file to\n\n Returns\n -------\n nothing\n " save_arr(arrs[1], dir, ('db%d_%3.2f_labels_arr%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[2], dir, ('db%d_%3.2f_rel_clustersizes%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[3], dir, ('db%d_%3.2f_clusteravg%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[4], dir, ('db%d_%3.2f_noise%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[5], dir, ('db%d_%3.2f_no_noise%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[7], dir, ('db%d_%3.2f_clusteravgasym%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[8], dir, ('db%d_%3.2f_asym%3.2f.tif' % (iter, thresh, eps))) save_arr(arrs[9], dir, ('db%d_%3.2f_abs_clustersizes%3.2f.tif' % (iter, thresh, eps)))
def auto_alien1(data, config, data_dir=None): "\n Removes aliens from experimental CDI data using iterative algorithm and returns the result.\n \n The algorithm follows the steps:\n 1. Initialization:\n - initialize variables with the configuration parameters\n - crop the data array around maximum element to it's biggest size\n - sets points below threshold value to 0\n - finds non-zero elements of the data array and keeps them as tuples of indices\n 2. Iteration loop, runs until number of clasters remains unchanged\n - runs DBSCAN algorithm on the non-zero and returns clasters labels\n - analyzes the results to find relative clusters sizes, and clusters average asymmetry, and other characteristics\n - removes alien clusters, i.e. the ones with relative cluster size below configured size threshold and with average asymmetry over configured asymmetry threshold\n - go back to the loop using the non-zero elements of alien removed array to the DBSCAN\n 3. If configured, add final step to apply gaussian convolusion to the result and use it as a filter with configured sigma as threshold\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n data_dir : str\n a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured\n\n Returns\n -------\n cuboid : ndarray\n data array with removed aliens\n " try: size_threshold = config.AA1_size_threshold except AttributeError: size_threshold = 0.01 except Exception as e: print('error parsing AA1_size_threshold ', str(e)) try: asym_threshold = config.AA1_asym_threshold except AttributeError: asym_threshold = 1.75 except Exception as e: print('error parsing AA1_asym_threshold ', str(e)) try: min_pts = config.AA1_min_pts except AttributeError: min_pts = 5 except Exception as e: print('error parsing AA1_min_pts ', str(e)) try: eps = config.AA1_eps except AttributeError: eps = 1.1 except Exception as e: print('error parsing AA1_eps ', str(e)) try: threshold = config.AA1_amp_threshold except AttributeError: print('AA1_amp_threshold parameter not configured, not removing aliens') return data except Exception as e: print('error parsing AA1_amp_threshold ', str(e)) try: save_arrs = config.AA1_save_arrs if save_arrs: save_dir = os.path.join(data_dir, 'alien_analysis') if (not os.path.exists(save_dir)): os.makedirs(save_dir) except AttributeError: save_arrs = False except Exception as e: print('error parsing save_arrs ', str(e)) try: expandcleanedsig = config.AA1_expandcleanedsigma except AttributeError: expandcleanedsig = 0.0 except Exception as e: print('error parsing expandcleanedsig ', str(e)) cuboid = crop_center(data) cuboid = np.where((cuboid >= threshold), cuboid, 0) if save_arrs: save_arr(cuboid, save_dir, ('db%3.2f_cuboid%3.2f.tif' % (threshold, eps))) save_arr(cuboid[::(- 1), ::(- 1), ::(- 1)], save_dir, ('db%3.2f_cuboidrev%3.2f.tif' % (threshold, eps))) non_zero = cuboid.nonzero() iter = 0 nclusters = 0 finished = False while (not finished): non_zero = cuboid.nonzero() labels = DBSCAN(eps=eps, metric='euclidean', min_samples=min_pts, n_jobs=(- 1)).fit_predict(np.array(non_zero).transpose().astype(np.float32)) arrs = analyze_clusters(cuboid, labels, non_zero) if save_arrs: save_arrays(arrs, iter, threshold, eps, save_dir) if (nclusters == arrs[0]): finished = True nclusters = arrs[0] if (iter == 0): rel_cluster_size = arrs[2] cluster_avg_asym = arrs[7] cuboid = np.where(np.logical_and((rel_cluster_size < size_threshold), (cluster_avg_asym > asym_threshold)), 0.0, cuboid) iter += 1 if (expandcleanedsig > 0): cuboid = np.where((cuboid > 0), 1.0, 0.0) sig = [expandcleanedsig, expandcleanedsig, 1.0] cuboid = ut.gauss_conv_fft(cuboid, sig) no_thresh_cuboid = crop_center(data) cuboid = np.where((cuboid > 0.1), no_thresh_cuboid, 0.0) return cuboid
1,125,620,878,779,161,500
Removes aliens from experimental CDI data using iterative algorithm and returns the result. The algorithm follows the steps: 1. Initialization: - initialize variables with the configuration parameters - crop the data array around maximum element to it's biggest size - sets points below threshold value to 0 - finds non-zero elements of the data array and keeps them as tuples of indices 2. Iteration loop, runs until number of clasters remains unchanged - runs DBSCAN algorithm on the non-zero and returns clasters labels - analyzes the results to find relative clusters sizes, and clusters average asymmetry, and other characteristics - removes alien clusters, i.e. the ones with relative cluster size below configured size threshold and with average asymmetry over configured asymmetry threshold - go back to the loop using the non-zero elements of alien removed array to the DBSCAN 3. If configured, add final step to apply gaussian convolusion to the result and use it as a filter with configured sigma as threshold Parameters ---------- data : ndarray an array with experiment data config : Object configuration object providing access to configuration parameters data_dir : str a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured Returns ------- cuboid : ndarray data array with removed aliens
scripts/alien_tools.py
auto_alien1
AdvancedPhotonSource/cdisupp
python
def auto_alien1(data, config, data_dir=None): "\n Removes aliens from experimental CDI data using iterative algorithm and returns the result.\n \n The algorithm follows the steps:\n 1. Initialization:\n - initialize variables with the configuration parameters\n - crop the data array around maximum element to it's biggest size\n - sets points below threshold value to 0\n - finds non-zero elements of the data array and keeps them as tuples of indices\n 2. Iteration loop, runs until number of clasters remains unchanged\n - runs DBSCAN algorithm on the non-zero and returns clasters labels\n - analyzes the results to find relative clusters sizes, and clusters average asymmetry, and other characteristics\n - removes alien clusters, i.e. the ones with relative cluster size below configured size threshold and with average asymmetry over configured asymmetry threshold\n - go back to the loop using the non-zero elements of alien removed array to the DBSCAN\n 3. If configured, add final step to apply gaussian convolusion to the result and use it as a filter with configured sigma as threshold\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n data_dir : str\n a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured\n\n Returns\n -------\n cuboid : ndarray\n data array with removed aliens\n " try: size_threshold = config.AA1_size_threshold except AttributeError: size_threshold = 0.01 except Exception as e: print('error parsing AA1_size_threshold ', str(e)) try: asym_threshold = config.AA1_asym_threshold except AttributeError: asym_threshold = 1.75 except Exception as e: print('error parsing AA1_asym_threshold ', str(e)) try: min_pts = config.AA1_min_pts except AttributeError: min_pts = 5 except Exception as e: print('error parsing AA1_min_pts ', str(e)) try: eps = config.AA1_eps except AttributeError: eps = 1.1 except Exception as e: print('error parsing AA1_eps ', str(e)) try: threshold = config.AA1_amp_threshold except AttributeError: print('AA1_amp_threshold parameter not configured, not removing aliens') return data except Exception as e: print('error parsing AA1_amp_threshold ', str(e)) try: save_arrs = config.AA1_save_arrs if save_arrs: save_dir = os.path.join(data_dir, 'alien_analysis') if (not os.path.exists(save_dir)): os.makedirs(save_dir) except AttributeError: save_arrs = False except Exception as e: print('error parsing save_arrs ', str(e)) try: expandcleanedsig = config.AA1_expandcleanedsigma except AttributeError: expandcleanedsig = 0.0 except Exception as e: print('error parsing expandcleanedsig ', str(e)) cuboid = crop_center(data) cuboid = np.where((cuboid >= threshold), cuboid, 0) if save_arrs: save_arr(cuboid, save_dir, ('db%3.2f_cuboid%3.2f.tif' % (threshold, eps))) save_arr(cuboid[::(- 1), ::(- 1), ::(- 1)], save_dir, ('db%3.2f_cuboidrev%3.2f.tif' % (threshold, eps))) non_zero = cuboid.nonzero() iter = 0 nclusters = 0 finished = False while (not finished): non_zero = cuboid.nonzero() labels = DBSCAN(eps=eps, metric='euclidean', min_samples=min_pts, n_jobs=(- 1)).fit_predict(np.array(non_zero).transpose().astype(np.float32)) arrs = analyze_clusters(cuboid, labels, non_zero) if save_arrs: save_arrays(arrs, iter, threshold, eps, save_dir) if (nclusters == arrs[0]): finished = True nclusters = arrs[0] if (iter == 0): rel_cluster_size = arrs[2] cluster_avg_asym = arrs[7] cuboid = np.where(np.logical_and((rel_cluster_size < size_threshold), (cluster_avg_asym > asym_threshold)), 0.0, cuboid) iter += 1 if (expandcleanedsig > 0): cuboid = np.where((cuboid > 0), 1.0, 0.0) sig = [expandcleanedsig, expandcleanedsig, 1.0] cuboid = ut.gauss_conv_fft(cuboid, sig) no_thresh_cuboid = crop_center(data) cuboid = np.where((cuboid > 0.1), no_thresh_cuboid, 0.0) return cuboid
def remove_blocks(data, config_map): '\n Sets to zero given alien blocks in the data array.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n\n Returns\n -------\n data : ndarray\n data array with zeroed out aliens\n ' try: aliens = config_map.aliens for alien in aliens: data[alien[0]:alien[3], alien[1]:alien[4], alien[2]:alien[5]] = 0 except AttributeError: print('aliens parameter not configured') except Exception as e: print('did not remove aliens, error in aliens removal ', str(e)) return data
-1,441,866,952,189,308,200
Sets to zero given alien blocks in the data array. Parameters ---------- data : ndarray an array with experiment data config : Object configuration object providing access to configuration parameters Returns ------- data : ndarray data array with zeroed out aliens
scripts/alien_tools.py
remove_blocks
AdvancedPhotonSource/cdisupp
python
def remove_blocks(data, config_map): '\n Sets to zero given alien blocks in the data array.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n\n Returns\n -------\n data : ndarray\n data array with zeroed out aliens\n ' try: aliens = config_map.aliens for alien in aliens: data[alien[0]:alien[3], alien[1]:alien[4], alien[2]:alien[5]] = 0 except AttributeError: print('aliens parameter not configured') except Exception as e: print('did not remove aliens, error in aliens removal ', str(e)) return data
def filter_aliens(data, config_map): '\n Sets to zero points in the data array defined by a file.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n\n Returns\n -------\n data : ndarray\n data array with zeroed out aliens\n ' try: alien_file = config_map.alien_file if os.path.isfile(alien_file): mask = np.load(alien_file) for i in range(len(mask.shape)): if (mask.shape[i] != data.shape[i]): print('exiting, mask must be of the same shape as data:', data.shape) return data = np.where((mask == 1), data, 0.0) except AttributeError: print('alien_file parameter not configured') except Exception as e: print('did not remove aliens, error in aliens removal ', str(e)) return data
8,194,168,656,737,036,000
Sets to zero points in the data array defined by a file. Parameters ---------- data : ndarray an array with experiment data config : Object configuration object providing access to configuration parameters Returns ------- data : ndarray data array with zeroed out aliens
scripts/alien_tools.py
filter_aliens
AdvancedPhotonSource/cdisupp
python
def filter_aliens(data, config_map): '\n Sets to zero points in the data array defined by a file.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n\n Returns\n -------\n data : ndarray\n data array with zeroed out aliens\n ' try: alien_file = config_map.alien_file if os.path.isfile(alien_file): mask = np.load(alien_file) for i in range(len(mask.shape)): if (mask.shape[i] != data.shape[i]): print('exiting, mask must be of the same shape as data:', data.shape) return data = np.where((mask == 1), data, 0.0) except AttributeError: print('alien_file parameter not configured') except Exception as e: print('did not remove aliens, error in aliens removal ', str(e)) return data
def remove_aliens(data, config_map, data_dir=None): "\n Finds which algorithm is cofigured to remove the aliens and applies it to clean the data.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n data_dir : str\n a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured\n Returns\n -------\n data : ndarray\n data array without aliens\n " try: algorithm = config_map.alien_alg if (algorithm == 'block_aliens'): data = remove_blocks(data, config_map) elif (algorithm == 'alien_file'): data = filter_aliens(data, config_map) elif (algorithm == 'AutoAlien1'): data = auto_alien1(data, config_map, data_dir) elif (algorithm != 'none'): print('not supported alien removal algorithm', algorithm) except AttributeError: pass except Exception as e: print('did not remove aliens, error in aliens removal, error: ', str(e)) return data
6,373,080,184,290,245,000
Finds which algorithm is cofigured to remove the aliens and applies it to clean the data. Parameters ---------- data : ndarray an array with experiment data config : Object configuration object providing access to configuration parameters data_dir : str a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured Returns ------- data : ndarray data array without aliens
scripts/alien_tools.py
remove_aliens
AdvancedPhotonSource/cdisupp
python
def remove_aliens(data, config_map, data_dir=None): "\n Finds which algorithm is cofigured to remove the aliens and applies it to clean the data.\n \n Parameters\n ----------\n data : ndarray\n an array with experiment data\n config : Object\n configuration object providing access to configuration parameters\n data_dir : str\n a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured\n Returns\n -------\n data : ndarray\n data array without aliens\n " try: algorithm = config_map.alien_alg if (algorithm == 'block_aliens'): data = remove_blocks(data, config_map) elif (algorithm == 'alien_file'): data = filter_aliens(data, config_map) elif (algorithm == 'AutoAlien1'): data = auto_alien1(data, config_map, data_dir) elif (algorithm != 'none'): print('not supported alien removal algorithm', algorithm) except AttributeError: pass except Exception as e: print('did not remove aliens, error in aliens removal, error: ', str(e)) return data
def print_current_status(status): '\n Prints the current status\n :param status:\n :return:\n ' print(status) for i in range((str(status).count('\n') + 1)): sys.stdout.write('\x1b[F')
4,987,133,255,887,730,000
Prints the current status :param status: :return:
nodes/gnss_status_viewer_node.py
print_current_status
naoki-mizuno/gnss_status_viewer
python
def print_current_status(status): '\n Prints the current status\n :param status:\n :return:\n ' print(status) for i in range((str(status).count('\n') + 1)): sys.stdout.write('\x1b[F')
def ensemble_negative_log_likelihood(labels, logits): "Negative log-likelihood for ensemble.\n\n For each datapoint (x,y), the ensemble's negative log-likelihood is:\n\n ```\n -log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +\n log ensemble_size.\n ```\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n " labels = tf.cast(labels, tf.int32) logits = tf.convert_to_tensor(logits) ensemble_size = float(logits.shape[0]) nll = tf.nn.sparse_softmax_cross_entropy_with_logits(tf.broadcast_to(labels[(tf.newaxis, ...)], tf.shape(logits)[:(- 1)]), logits) return ((- tf.reduce_logsumexp((- nll), axis=0)) + tf.math.log(ensemble_size))
4,068,252,308,176,723,500
Negative log-likelihood for ensemble. For each datapoint (x,y), the ensemble's negative log-likelihood is: ``` -log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) + log ensemble_size. ``` Args: labels: tf.Tensor of shape [...]. logits: tf.Tensor of shape [ensemble_size, ..., num_classes]. Returns: tf.Tensor of shape [...].
baselines/imagenet/ensemble.py
ensemble_negative_log_likelihood
mhavasi/edward2
python
def ensemble_negative_log_likelihood(labels, logits): "Negative log-likelihood for ensemble.\n\n For each datapoint (x,y), the ensemble's negative log-likelihood is:\n\n ```\n -log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +\n log ensemble_size.\n ```\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n " labels = tf.cast(labels, tf.int32) logits = tf.convert_to_tensor(logits) ensemble_size = float(logits.shape[0]) nll = tf.nn.sparse_softmax_cross_entropy_with_logits(tf.broadcast_to(labels[(tf.newaxis, ...)], tf.shape(logits)[:(- 1)]), logits) return ((- tf.reduce_logsumexp((- nll), axis=0)) + tf.math.log(ensemble_size))
def gibbs_cross_entropy(labels, logits): "Average cross entropy for ensemble members (Gibbs cross entropy).\n\n For each datapoint (x,y), the ensemble's Gibbs cross entropy is:\n\n ```\n GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).\n ```\n\n The Gibbs cross entropy approximates the average cross entropy of a single\n model drawn from the (Gibbs) ensemble.\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n " labels = tf.cast(labels, tf.int32) logits = tf.convert_to_tensor(logits) nll = tf.nn.sparse_softmax_cross_entropy_with_logits(tf.broadcast_to(labels[(tf.newaxis, ...)], tf.shape(logits)[:(- 1)]), logits) return tf.reduce_mean(nll, axis=0)
5,129,305,994,803,191,000
Average cross entropy for ensemble members (Gibbs cross entropy). For each datapoint (x,y), the ensemble's Gibbs cross entropy is: ``` GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m). ``` The Gibbs cross entropy approximates the average cross entropy of a single model drawn from the (Gibbs) ensemble. Args: labels: tf.Tensor of shape [...]. logits: tf.Tensor of shape [ensemble_size, ..., num_classes]. Returns: tf.Tensor of shape [...].
baselines/imagenet/ensemble.py
gibbs_cross_entropy
mhavasi/edward2
python
def gibbs_cross_entropy(labels, logits): "Average cross entropy for ensemble members (Gibbs cross entropy).\n\n For each datapoint (x,y), the ensemble's Gibbs cross entropy is:\n\n ```\n GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).\n ```\n\n The Gibbs cross entropy approximates the average cross entropy of a single\n model drawn from the (Gibbs) ensemble.\n\n Args:\n labels: tf.Tensor of shape [...].\n logits: tf.Tensor of shape [ensemble_size, ..., num_classes].\n\n Returns:\n tf.Tensor of shape [...].\n " labels = tf.cast(labels, tf.int32) logits = tf.convert_to_tensor(logits) nll = tf.nn.sparse_softmax_cross_entropy_with_logits(tf.broadcast_to(labels[(tf.newaxis, ...)], tf.shape(logits)[:(- 1)]), logits) return tf.reduce_mean(nll, axis=0)
@pytest.fixture(scope='session') def app(app_settings): ' WSGI application level functional testing. ' return main({}, **app_settings)
-9,000,578,610,465,128,000
WSGI application level functional testing.
src/encoded/tests/conftest.py
app
dbmi-bgm/cgap-portal
python
@pytest.fixture(scope='session') def app(app_settings): ' ' return main({}, **app_settings)
@pytest.fixture(scope='session') def es_app(es_app_settings, **kwargs): '\n App that uses both Postgres and ES - pass this as "app" argument to TestApp.\n Pass all kwargs onto create_mapping\n ' app = main({}, **es_app_settings) create_mapping.run(app, **kwargs) return app
-7,586,295,019,175,009,000
App that uses both Postgres and ES - pass this as "app" argument to TestApp. Pass all kwargs onto create_mapping
src/encoded/tests/conftest.py
es_app
dbmi-bgm/cgap-portal
python
@pytest.fixture(scope='session') def es_app(es_app_settings, **kwargs): '\n App that uses both Postgres and ES - pass this as "app" argument to TestApp.\n Pass all kwargs onto create_mapping\n ' app = main({}, **es_app_settings) create_mapping.run(app, **kwargs) return app
@pytest.fixture def anontestapp(app): 'TestApp for anonymous user (i.e., no user specified), accepting JSON data.' environ = {'HTTP_ACCEPT': 'application/json'} return webtest.TestApp(app, environ)
7,349,053,589,448,217,000
TestApp for anonymous user (i.e., no user specified), accepting JSON data.
src/encoded/tests/conftest.py
anontestapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def anontestapp(app): environ = {'HTTP_ACCEPT': 'application/json'} return webtest.TestApp(app, environ)
@pytest.fixture def anonhtmltestapp(app): 'TestApp for anonymous (not logged in) user, accepting text/html content.' environ = {'HTTP_ACCEPT': 'text/html'} test_app = webtest.TestApp(app, environ) return test_app
8,208,020,015,496,335,000
TestApp for anonymous (not logged in) user, accepting text/html content.
src/encoded/tests/conftest.py
anonhtmltestapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def anonhtmltestapp(app): environ = {'HTTP_ACCEPT': 'text/html'} test_app = webtest.TestApp(app, environ) return test_app
@pytest.fixture def anon_es_testapp(es_app): ' TestApp simulating a bare Request entering the application (with ES enabled) ' environ = {'HTTP_ACCEPT': 'application/json'} return webtest.TestApp(es_app, environ)
-720,368,871,870,829,800
TestApp simulating a bare Request entering the application (with ES enabled)
src/encoded/tests/conftest.py
anon_es_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def anon_es_testapp(es_app): ' ' environ = {'HTTP_ACCEPT': 'application/json'} return webtest.TestApp(es_app, environ)
@pytest.fixture def anon_html_es_testapp(es_app): 'TestApp with ES + Postgres for anonymous (not logged in) user, accepting text/html content.' environ = {'HTTP_ACCEPT': 'text/html'} return webtest.TestApp(es_app, environ)
-607,309,052,424,212,100
TestApp with ES + Postgres for anonymous (not logged in) user, accepting text/html content.
src/encoded/tests/conftest.py
anon_html_es_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def anon_html_es_testapp(es_app): environ = {'HTTP_ACCEPT': 'text/html'} return webtest.TestApp(es_app, environ)
@pytest.fixture(scope='session') def testapp(app): 'TestApp for username TEST, accepting JSON data.' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'} return webtest.TestApp(app, environ)
-9,211,947,515,465,539,000
TestApp for username TEST, accepting JSON data.
src/encoded/tests/conftest.py
testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture(scope='session') def testapp(app): environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'} return webtest.TestApp(app, environ)
@pytest.fixture def htmltestapp(app): 'TestApp for TEST user, accepting text/html content.' environ = {'HTTP_ACCEPT': 'text/html', 'REMOTE_USER': 'TEST'} test_app = webtest.TestApp(app, environ) return test_app
6,502,149,716,404,919,000
TestApp for TEST user, accepting text/html content.
src/encoded/tests/conftest.py
htmltestapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def htmltestapp(app): environ = {'HTTP_ACCEPT': 'text/html', 'REMOTE_USER': 'TEST'} test_app = webtest.TestApp(app, environ) return test_app
@pytest.fixture(scope='session') def es_testapp(es_app): ' TestApp with ES + Postgres. Must be imported where it is needed. ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'} return webtest.TestApp(es_app, environ)
-4,503,890,784,941,239,000
TestApp with ES + Postgres. Must be imported where it is needed.
src/encoded/tests/conftest.py
es_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture(scope='session') def es_testapp(es_app): ' ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'} return webtest.TestApp(es_app, environ)
@pytest.fixture def html_es_testapp(es_app): 'TestApp with ES + Postgres for TEST user, accepting text/html content.' environ = {'HTTP_ACCEPT': 'text/html', 'REMOTE_USER': 'TEST'} return webtest.TestApp(es_app, environ)
-6,523,108,449,352,916,000
TestApp with ES + Postgres for TEST user, accepting text/html content.
src/encoded/tests/conftest.py
html_es_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def html_es_testapp(es_app): environ = {'HTTP_ACCEPT': 'text/html', 'REMOTE_USER': 'TEST'} return webtest.TestApp(es_app, environ)
@pytest.fixture def authenticated_testapp(app): 'TestApp for an authenticated, non-admin user (TEST_AUTHENTICATED), accepting JSON data.' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_AUTHENTICATED'} return webtest.TestApp(app, environ)
3,389,445,688,238,822,000
TestApp for an authenticated, non-admin user (TEST_AUTHENTICATED), accepting JSON data.
src/encoded/tests/conftest.py
authenticated_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def authenticated_testapp(app): environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_AUTHENTICATED'} return webtest.TestApp(app, environ)
@pytest.fixture def authenticated_es_testapp(es_app): ' TestApp for authenticated non-admin user with ES ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_AUTHENTICATED'} return webtest.TestApp(es_app, environ)
2,806,828,281,893,692,000
TestApp for authenticated non-admin user with ES
src/encoded/tests/conftest.py
authenticated_es_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def authenticated_es_testapp(es_app): ' ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_AUTHENTICATED'} return webtest.TestApp(es_app, environ)
@pytest.fixture def submitter_testapp(app): 'TestApp for a non-admin user (TEST_SUBMITTER), accepting JSON data.' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_SUBMITTER'} return webtest.TestApp(app, environ)
3,713,334,603,033,996,300
TestApp for a non-admin user (TEST_SUBMITTER), accepting JSON data.
src/encoded/tests/conftest.py
submitter_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def submitter_testapp(app): environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST_SUBMITTER'} return webtest.TestApp(app, environ)
@pytest.fixture def indexer_testapp(es_app): ' Indexer testapp, meant for manually triggering indexing runs by posting to /index.\n Always uses the ES app (obviously, but not so obvious previously) ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'INDEXER'} return webtest.TestApp(es_app, environ)
-5,551,315,579,333,300,000
Indexer testapp, meant for manually triggering indexing runs by posting to /index. Always uses the ES app (obviously, but not so obvious previously)
src/encoded/tests/conftest.py
indexer_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def indexer_testapp(es_app): ' Indexer testapp, meant for manually triggering indexing runs by posting to /index.\n Always uses the ES app (obviously, but not so obvious previously) ' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'INDEXER'} return webtest.TestApp(es_app, environ)
@pytest.fixture def embed_testapp(app): 'TestApp for user EMBED, accepting JSON data.' environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'EMBED'} return webtest.TestApp(app, environ)
4,015,416,991,047,454,000
TestApp for user EMBED, accepting JSON data.
src/encoded/tests/conftest.py
embed_testapp
dbmi-bgm/cgap-portal
python
@pytest.fixture def embed_testapp(app): environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'EMBED'} return webtest.TestApp(app, environ)
@pytest.fixture def wsgi_app(wsgi_server): 'TestApp for WSGI server.' return webtest.TestApp(wsgi_server)
-7,219,069,910,056,173,000
TestApp for WSGI server.
src/encoded/tests/conftest.py
wsgi_app
dbmi-bgm/cgap-portal
python
@pytest.fixture def wsgi_app(wsgi_server): return webtest.TestApp(wsgi_server)
@pytest.fixture(scope='session') def workbook(es_app): " Loads a bunch of data (tests/data/workbook-inserts) into the system on first run\n (session scope doesn't work). " WorkbookCache.initialize_if_needed(es_app)
-6,721,380,994,656,458,000
Loads a bunch of data (tests/data/workbook-inserts) into the system on first run (session scope doesn't work).
src/encoded/tests/conftest.py
workbook
dbmi-bgm/cgap-portal
python
@pytest.fixture(scope='session') def workbook(es_app): " Loads a bunch of data (tests/data/workbook-inserts) into the system on first run\n (session scope doesn't work). " WorkbookCache.initialize_if_needed(es_app)
@distributed_trace def get_method_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_method_local_valid_request(template_url=self.get_method_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
-2,154,966,697,742,777,000
Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed. :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_api_version_local_operations.py
get_method_local_valid
Azure/autorest.python
python
@distributed_trace def get_method_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_method_local_valid_request(template_url=self.get_method_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace def get_method_local_null(self, api_version=None, **kwargs): 'Get method with api-version modeled in the method. pass in api-version = null to succeed.\n\n :param api_version: This should appear as a method parameter, use value null, this should\n result in no serialized parameter.\n :type api_version: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_method_local_null_request(api_version=api_version, template_url=self.get_method_local_null.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
-1,328,737,055,622,945,000
Get method with api-version modeled in the method. pass in api-version = null to succeed. :param api_version: This should appear as a method parameter, use value null, this should result in no serialized parameter. :type api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_api_version_local_operations.py
get_method_local_null
Azure/autorest.python
python
@distributed_trace def get_method_local_null(self, api_version=None, **kwargs): 'Get method with api-version modeled in the method. pass in api-version = null to succeed.\n\n :param api_version: This should appear as a method parameter, use value null, this should\n result in no serialized parameter.\n :type api_version: str\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_method_local_null_request(api_version=api_version, template_url=self.get_method_local_null.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace def get_path_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_path_local_valid_request(template_url=self.get_path_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
-1,937,686,304,681,717,200
Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed. :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_api_version_local_operations.py
get_path_local_valid
Azure/autorest.python
python
@distributed_trace def get_path_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_path_local_valid_request(template_url=self.get_path_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@distributed_trace def get_swagger_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_swagger_local_valid_request(template_url=self.get_swagger_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
889,666,015,888,593,800
Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed. :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/azure/legacy/Expected/AcceptanceTests/AzureSpecials/azurespecialproperties/operations/_api_version_local_operations.py
get_swagger_local_valid
Azure/autorest.python
python
@distributed_trace def get_swagger_local_valid(self, **kwargs): "Get method with api-version modeled in the method. pass in api-version = '2.0' to succeed.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n " cls = kwargs.pop('cls', None) error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) request = build_get_swagger_local_valid_request(template_url=self.get_swagger_local_valid.metadata['url']) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {})
@app.template_filter() def friendly_time(dt, past_='ago', future_='from now', default='just now'): '\n Returns string representing "time since"\n or "time until" e.g.\n 3 days ago, 5 hours from now etc.\n ' if (dt is None): return '' if isinstance(dt, basestring): dt = iso8601.parse_date(dt) if dt.tzinfo: dt = dt.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.utcnow() if (now > dt): diff = (now - dt) dt_is_past = True else: diff = (dt - now) dt_is_past = False periods = (((diff.days / 365), 'year', 'years'), ((diff.days / 30), 'month', 'months'), ((diff.days / 7), 'week', 'weeks'), (diff.days, 'day', 'days'), ((diff.seconds / 3600), 'hour', 'hours'), ((diff.seconds / 60), 'minute', 'minutes'), (diff.seconds, 'second', 'seconds')) for (period, singular, plural) in periods: if period: return ('%d %s %s' % (period, (singular if (period == 1) else plural), (past_ if dt_is_past else future_))) return default
5,157,354,521,017,599,000
Returns string representing "time since" or "time until" e.g. 3 days ago, 5 hours from now etc.
app.py
friendly_time
codeforamerica/srtracker
python
@app.template_filter() def friendly_time(dt, past_='ago', future_='from now', default='just now'): '\n Returns string representing "time since"\n or "time until" e.g.\n 3 days ago, 5 hours from now etc.\n ' if (dt is None): return if isinstance(dt, basestring): dt = iso8601.parse_date(dt) if dt.tzinfo: dt = dt.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.utcnow() if (now > dt): diff = (now - dt) dt_is_past = True else: diff = (dt - now) dt_is_past = False periods = (((diff.days / 365), 'year', 'years'), ((diff.days / 30), 'month', 'months'), ((diff.days / 7), 'week', 'weeks'), (diff.days, 'day', 'days'), ((diff.seconds / 3600), 'hour', 'hours'), ((diff.seconds / 60), 'minute', 'minutes'), (diff.seconds, 'second', 'seconds')) for (period, singular, plural) in periods: if period: return ('%d %s %s' % (period, (singular if (period == 1) else plural), (past_ if dt_is_past else future_))) return default
@app.template_filter() def title_address(address): 'Slightly improved title() method for address strings\n Makes sure state abbreviations are upper-case.' titled = address.title() titled = state_pattern.sub((lambda match: (match.group(1).upper() + (match.group(2) or ''))), titled) return titled
5,237,181,232,790,722,000
Slightly improved title() method for address strings Makes sure state abbreviations are upper-case.
app.py
title_address
codeforamerica/srtracker
python
@app.template_filter() def title_address(address): 'Slightly improved title() method for address strings\n Makes sure state abbreviations are upper-case.' titled = address.title() titled = state_pattern.sub((lambda match: (match.group(1).upper() + (match.group(2) or ))), titled) return titled
def render_app_template(template, **kwargs): 'Add some goodies to all templates.' if ('config' not in kwargs): kwargs['config'] = app.config if ('__version__' not in kwargs): kwargs['__version__'] = __version__ return render_template(template, **kwargs)
7,080,468,876,659,081,000
Add some goodies to all templates.
app.py
render_app_template
codeforamerica/srtracker
python
def render_app_template(template, **kwargs): if ('config' not in kwargs): kwargs['config'] = app.config if ('__version__' not in kwargs): kwargs['__version__'] = __version__ return render_template(template, **kwargs)
def fixup_sr(sr, request_id=None): "\n Fix up an SR to try and ensure some basic info.\n (In Chicago's API, any field can be missing, even if it's required.)\n " remove_blacklisted_fields(sr) if ('service_request_id' not in sr): sr['service_request_id'] = (request_id or sr.get('token', 'UNKNOWN')) if ('status' not in sr): sr['status'] = 'open' if ('service_name' not in sr): sr['service_name'] = 'Miscellaneous Services' return sr
-3,728,430,768,534,553,000
Fix up an SR to try and ensure some basic info. (In Chicago's API, any field can be missing, even if it's required.)
app.py
fixup_sr
codeforamerica/srtracker
python
def fixup_sr(sr, request_id=None): "\n Fix up an SR to try and ensure some basic info.\n (In Chicago's API, any field can be missing, even if it's required.)\n " remove_blacklisted_fields(sr) if ('service_request_id' not in sr): sr['service_request_id'] = (request_id or sr.get('token', 'UNKNOWN')) if ('status' not in sr): sr['status'] = 'open' if ('service_name' not in sr): sr['service_name'] = 'Miscellaneous Services' return sr
def _css_select(soup, css_selector): ' Returns the content of the element pointed by the CSS selector,\n or an empty string if not found ' selection = soup.select(css_selector) if (len(selection) > 0): if hasattr(selection[0], 'text'): retour = selection[0].text.strip() else: retour = '' else: retour = '' return retour
6,685,540,123,375,371,000
Returns the content of the element pointed by the CSS selector, or an empty string if not found
precisionmapper/__init__.py
_css_select
tducret/precisionmapper-python
python
def _css_select(soup, css_selector): ' Returns the content of the element pointed by the CSS selector,\n or an empty string if not found ' selection = soup.select(css_selector) if (len(selection) > 0): if hasattr(selection[0], 'text'): retour = selection[0].text.strip() else: retour = else: retour = return retour