function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def __init__(self, *failures):
self.failures = failures | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def get_value(self, col, sid, dt):
return 100 | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, dividends, mergers)
)
return path | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1)) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % extra
)
def make_param_sets():
return product(*(params[name] for name in argnames))
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(f)(*args, **kwargs)
return wrapped | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('ex_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
('record_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('amount', 'float64'),
('sid', 'int32'),
],
),
index=pd.DatetimeIndex([], tz='UTC'),
) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read() | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def test_resource_path(*path_parts):
return os.path.join(zipline_git_root, 'tests', 'resources', *path_parts) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def _reader_cls(self):
raise NotImplementedError('_reader') | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def _write(self, env, days, path, data):
raise NotImplementedError('_write') | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
env = self._env
try:
self._write(
env,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except:
self.__exit__(None, None, None)
raise | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def _write(env, days, path, data):
BcolzDailyBarWriter(path, days).write(data) | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
_getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def __init__(self, data_frequency):
super(RecordBatchBlotter, self).__init__(data_frequency)
self.order_batch_called = [] | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def compute(self, today, assets, out):
out[:] = assets | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def compute(self, today, assets, out):
out[:] = assets + today.day | bartosh/zipline | [
12,
5,
12,
2,
1474228866
] |
def send(message, subject, to, to_name=None, sender=None, sender_name=None):
"""Send an email."""
sender = sender or app.config.get('MAIL_FROM')
sender_name = sender_name or app.config.get('MAIL_FROM_NAME') or ''
mail_provider = app.config.get('MAIL_PROVIDER')
if mail_provider is None:
app.logger.error('No MAIL_PROVIDER configured!')
raise MailFailure('No MAIL_PROVIDER configured!')
elif mail_provider == 'smtp':
_send_smtp(message, subject, to, to_name, sender, sender_name)
elif mail_provider == 'mailjet':
_send_mailjet(message, subject, to, to_name, sender, sender_name)
else:
app.logger.error('Invalid MAIL_PROVIDER configured!')
raise MailFailure('Invalid MAIL_PROVIDER configured!') | google/ctfscoreboard | [
159,
69,
159,
69,
1464885862
] |
def test_excluded_keys_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}]
expected = [{"b": 2}, {"b": 4}]
preprocessor = ExcludedKeysResultsPreprocessor("a")
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def test_sequential_results_preprocessor():
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = [{"b": 2}, {"b": 6}]
preprocessor_1 = ExcludedKeysResultsPreprocessor("a")
# [{"b": 2}, {"b": 4}, {"b": 6}, {"b": 8}]
preprocessor_2 = IndexedResultsPreprocessor([0, 2])
preprocessor = SequentialResultsPreprocessor([preprocessor_1, preprocessor_2])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def test_max_results_preprocessor():
from copy import deepcopy
import numpy as np
results = [{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}, {"a": 7, "b": 8}]
expected = deepcopy(results)
for res in expected:
res.update(
{
"max(a)": np.max([result["a"] for result in results]),
"max(b)": np.max([result["b"] for result in results]),
}
)
preprocessor = MaxResultsPreprocessor(["a", "b"])
preprocessed_results = preprocessor.preprocess(results)
assert preprocessed_results == expected | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def test_warning_in_aggregate_results_preprocessors(
caplog, results_preprocessor, expected_value | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def test_warning_in_weighted_average_results_preprocessors(caplog):
import logging
from copy import deepcopy
caplog.at_level(logging.WARNING)
results1 = [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]
results2 = [{"b": 1}, {"b": 2}, {"b": 3}, {"b": 4}]
results3 = [
{"a": 1, "c": 3},
{"a": 2, "c": "invalid"},
{"a": "invalid", "c": 1},
{"a": 4, "c": "invalid"},
]
results4 = [
{"a": 1, "c": "invalid"},
{"a": 2, "c": "invalid"},
{"a": 3, "c": "invalid"},
{"a": 4, "c": "invalid"},
]
# test case 1: weight key `b` is not reported from all workers
results_preprocessor1 = WeightedAverageResultsPreprocessor(["a"], "b")
expected1 = deepcopy(results1)
for res in expected1:
res.update({"weight_avg_b(a)": 2.5})
assert results_preprocessor1.preprocess(results1) == expected1
assert (
"Averaging weight `b` is not reported by all workers in `train.report()`."
in caplog.text
)
assert "Use equal weight instead." in caplog.text
# test case 2: metric key `a` (to be averaged) is not reported from all workers
results_preprocessor1.preprocess(results2)
assert "`a` is not reported from workers, so it is ignored." in caplog.text
# test case 3: both metric and weight keys have invalid data type
results_preprocessor2 = WeightedAverageResultsPreprocessor(["a"], "c")
expected3 = deepcopy(results3)
for res in expected3:
res.update({"weight_avg_c(a)": 1.0})
assert results_preprocessor2.preprocess(results3) == expected3
# test case 4: all weight values are invalid
expected4 = deepcopy(results4)
for res in expected4:
res.update({"weight_avg_c(a)": 2.5})
assert results_preprocessor2.preprocess(results4) == expected4
assert "Averaging weight `c` value type is not valid." in caplog.text
for record in caplog.records:
assert record.levelname == "WARNING" | ray-project/ray | [
24488,
4264,
24488,
2914,
1477424310
] |
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids) | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type) | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
} | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]] | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def from_dict(cls, param):
return cls(param['cohort_set'], param['items']) | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def get_track_id_list(param):
return list(map(str, param)) | isb-cgc/ISB-CGC-Webapp | [
12,
9,
12,
7,
1443114166
] |
def sample_delete_study():
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteStudyRequest(
name="name_value",
)
# Make the request
client.delete_study(request=request) | googleapis/python-aiplatform | [
306,
205,
306,
52,
1600875819
] |
def get(self, *args, **kwargs):
"""Lists all the alerts.
Args:
[0], the alert id (optional)
Example URLs:
GET /api/v1/alerts
GET /api/v1/alerts/52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
return self.service.alerts \
if not args else self.service.alerts[uuid.UUID(args[0])] | 5g-empower/empower-runtime | [
46,
47,
46,
3,
1441462064
] |
def post(self, *args, **kwargs):
"""Create a new alert.
Args:
[0], the alert id (optional)
Request:
version: protocol version (1.0)
alert: the alert
"""
alert_id = uuid.UUID(args[0]) if args else uuid.uuid4()
if 'alert' in kwargs:
alert = self.service.create(uuid=alert_id, alert=kwargs['alert'])
else:
alert = self.service.create(uuid=alert_id)
self.set_header("Location", "/api/v1/alerts/%s" % alert.uuid) | 5g-empower/empower-runtime | [
46,
47,
46,
3,
1441462064
] |
def fn(self):
n_shots = _abbrev_n_shots(n_shots=self.n_shots)
qubit = _abbrev_grid_qubit(self.qubit)
return (f'{self.dataset_id}/'
f'{self.device_name}/'
f'q-{qubit}/'
f'ry_scan_{self.resolution_factor}_{n_shots}') | quantumlib/ReCirq | [
232,
110,
232,
34,
1584057093
] |
def _abbrev_n_shots(n_shots: int) -> str:
"""Shorter n_shots component of a filename"""
if n_shots % 1000 == 0:
return f'{n_shots // 1000}k'
return str(n_shots) | quantumlib/ReCirq | [
232,
110,
232,
34,
1584057093
] |
def run_readout_scan(task: ReadoutScanTask,
base_dir=None):
"""Execute a :py:class:`ReadoutScanTask` task."""
if base_dir is None:
base_dir = DEFAULT_BASE_DIR | quantumlib/ReCirq | [
232,
110,
232,
34,
1584057093
] |
def __init__(self, db):
self.db = db
self.cache = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.extraFilters = []
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def updateArgs(self, *args, **keywords):
"""
Call as::
C{I{ClassName}(I{info}, I{filterexp})}
or::
C{I{ClassName}(I{info}, exceptions=I{filterexp})}
where C{I{filterexp}} is either a regular expression or a
tuple of C{(regexp[, setmodes[, unsetmodes]])}
"""
if args:
args = list(args)
info = args.pop(0)
if args:
if not self.included:
self.included = {}
if info not in self.included:
self.included[info] = []
self.included[info].extend(args)
elif 'exceptions' in keywords:
# not the usual exception handling, this is an exception
if not self.excluded:
self.excluded = {}
if info not in self.excluded:
self.excluded[info] = []
self.excluded[info].append(keywords.pop('exceptions'))
else:
raise TypeError, 'no paths provided'
policy.Policy.updateArgs(self, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
fullpath = self.recipe.macros.destdir+path
if not util.isregular(fullpath) and not os.path.islink(fullpath):
return
self.runInfo(path) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, filename):
m = self.recipe.magic[filename]
if m and m.name == "ELF":
# an ELF file cannot be a config file, some programs put
# ELF files under /etc (X, for example), and tag handlers
# can be ELF or shell scripts; we just want tag handlers
# to be config files if they are shell scripts.
# Just in case it was not intentional, warn...
if self.macros.sysconfdir in filename:
self.info('ELF file %s found in config directory', filename)
return
fullpath = self.macros.destdir + filename
if os.path.isfile(fullpath) and util.isregular(fullpath):
if self._fileIsBinary(filename, fullpath):
self.error("binary file '%s' is marked as config" % \
filename)
self._markConfig(filename, fullpath) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _addTrailingNewline(self, filename, fullpath):
# FIXME: This exists only for stability; there is no longer
# any need to add trailing newlines to config files. This
# also violates the rule that no files are modified after
# destdir modification has been completed.
self.warn("adding trailing newline to config file '%s'" % \
filename)
mode = os.lstat(fullpath)[stat.ST_MODE]
oldmode = None
if mode & 0600 != 0600:
# need to be able to read and write the file to fix it
oldmode = mode
os.chmod(fullpath, mode|0600)
f = open(fullpath, 'a')
f.seek(0, 2)
f.write('\n')
f.close()
if oldmode is not None:
os.chmod(fullpath, oldmode) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
"""
@keyword catchall: The component name which gets all otherwise
unassigned files. Default: C{runtime}
"""
_filterSpec.__init__(self, *args, **keywords)
self.configFilters = []
self.derivedFilters = [] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
compFilters = []
self.macros = recipe.macros
self.rootdir = self.rootdir % recipe.macros
self.loadFilterDirs()
# The extras need to come before base in order to override decisions
# in the base subfilters; invariants come first for those very few
# specs that absolutely should not be overridden in recipes.
for filteritem in itertools.chain(self.invariantFilters,
self.extraFilters,
self.derivedFilters,
self.configFilters,
self.baseFilters):
if not isinstance(filteritem, (filter.Filter, filter.PathSet)):
name = filteritem[0] % self.macros
assert(name != 'source')
args, kwargs = self.filterExpArgs(filteritem[1:], name=name)
filteritem = filter.Filter(*args, **kwargs)
compFilters.append(filteritem)
# by default, everything that hasn't matched a filter pattern yet
# goes in the catchall component ('runtime' by default)
compFilters.append(filter.Filter('.*', self.macros, name=self.catchall))
# pass these down to PackageSpec for building the package
recipe.PackageSpec(compFilters=compFilters) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def warnMissing(missing):
self.error('%s depends on missing %s', filterName, missing) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def loadFilter(self, filterType, map, filename, fullpath):
# do not load shared libraries
desc = [x for x in imp.get_suffixes() if x[0] == '.py'][0]
f = file(fullpath)
modname = filename[:-3]
m = imp.load_module(modname, f, fullpath, desc)
f.close()
if not 'filters' in m.__dict__:
self.warn('%s missing "filters"; not a valid component'
' specification file', fullpath)
return
filters = m.__dict__['filters']
if filters and len(filters) > 1 and type(filters[1]) not in (list,
tuple):
self.error('invalid expression in %s: filters specification'
" must be ('name', ('expression', ...))", fullpath)
follows = ()
if 'follows' in m.__dict__:
follows = m.__dict__['follows']
precedes = ()
if 'precedes' in m.__dict__:
precedes = m.__dict__['precedes']
map[modname] = (filters, follows, precedes) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
"""
@keyword compFilters: reserved for C{ComponentSpec} to pass information
needed by C{PackageSpec}.
"""
_filterSpec.__init__(self, *args, **keywords)
self.configFiles = []
self.derivedFilters = [] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def preProcess(self):
self.pkgFilters = []
recipe = self.recipe
self.destdir = recipe.macros.destdir
if self.exceptions:
self.warn('PackageSpec does not honor exceptions')
self.exceptions = None
if self.inclusions:
# would have an effect only with exceptions listed, so no warning...
self.inclusions = None
# userinfo and groupinfo are invariant filters, so they must come first
for infoType in ('user', 'group'):
infoDir = '%%(%sinfodir)s' % infoType % self.macros
realDir = util.joinPaths(self.destdir, infoDir)
if not os.path.isdir(realDir):
continue
for infoPkgName in os.listdir(realDir):
pkgPath = util.joinPaths(infoDir, infoPkgName)
self.pkgFilters.append( \
filter.Filter(pkgPath, self.macros,
name = 'info-%s' % infoPkgName))
# extras need to come before derived so that derived packages
# can change the package to which a file is assigned
for filteritem in itertools.chain(self.extraFilters,
self.derivedFilters):
if not isinstance(filteritem, (filter.Filter, filter.PathSet)):
name = filteritem[0] % self.macros
if not trove.troveNameIsValid(name):
self.error('%s is not a valid package name', name)
args, kwargs = self.filterExpArgs(filteritem[1:], name=name)
self.pkgFilters.append(filter.Filter(*args, **kwargs))
else:
self.pkgFilters.append(filteritem)
# by default, everything that hasn't matched a pattern in the
# main package filter goes in the package named recipe.name
self.pkgFilters.append(filter.Filter('.*', self.macros, name=recipe.name))
# OK, all the filters exist, build an autopackage object that
# knows about them
recipe.autopkg = buildpackage.AutoBuildPackage(
self.pkgFilters, self.compFilters, recipe)
self.autopkg = recipe.autopkg | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
# all policy classes after this require that the initial tree is built
if not self.recipe._getCapsulePathsForFile(path):
realPath = self.destdir + path
self.autopkg.addFile(path, realPath) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def postInit(self, *args, **kwargs):
self.recipe.Config(exceptions = self.invariantinclusions,
allowUnusedFilters = True) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, filename):
fullpath = self.macros.destdir + filename
recipe = self.recipe
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info(filename)
f = recipe.autopkg.pathMap[filename]
f.flags.isInitialContents(True)
if f.flags.isConfig():
self.error(
'%s is marked as both a configuration file and'
' an initial contents file', filename) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, filename):
fullpath = self.macros.destdir + filename
if os.path.isfile(fullpath) and util.isregular(fullpath):
recipe = self.recipe
f = recipe.autopkg.pathMap[filename]
f.flags.isTransient(True)
if f.flags.isConfig() or f.flags.isInitialContents():
self.error(
'%s is marked as both a transient file and'
' a configuration or initial contents file', filename) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.macros.destdir + path
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info('conary tag file: %s', path)
self.recipe.autopkg.pathMap[path].tags.set("tagdescription") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.macros.destdir + path
if os.path.isfile(fullpath) and util.isregular(fullpath):
self.info('conary tag handler: %s', path)
self.recipe.autopkg.pathMap[path].tags.set("taghandler") | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
self.tagList = []
self.buildReqsComputedForTags = set()
self.suggestBuildRequires = set()
# read the system and %(destdir)s tag databases
for directory in (recipe.macros.destdir+'/etc/conary/tags/',
'/etc/conary/tags/'):
if os.path.isdir(directory):
for filename in os.listdir(directory):
path = util.joinPaths(directory, filename)
self.tagList.append(tags.TagFile(path, recipe.macros, True))
self.fullReqs = self.recipe._getTransitiveBuildRequiresNames()
_addInfo.doProcess(self, recipe) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def runInfo(self, path):
if self.recipe._getCapsulePathsForFile(path):
# capsules do not participate in the tag protocol
return
excludedTags = {}
for tag in self.included:
for filt in self.included[tag]:
if filt.match(path):
isExcluded = False
if tag in self.excluded:
for filt in self.excluded[tag]:
if filt.match(path):
s = excludedTags.setdefault(tag, set())
s.add(path)
isExcluded = True
break
if not isExcluded:
self.markTag(tag, tag, path)
for tag in self.tagList:
if tag.match(path):
if tag.name:
name = tag.name
else:
name = tag.tag
isExcluded = False
if tag.tag in self.excluded:
for filt in self.excluded[tag.tag]:
# exception handling is per-tag, so handled specially
if filt.match(path):
s = excludedTags.setdefault(name, set())
s.add(path)
isExcluded = True
break
if not isExcluded:
self.markTag(name, tag.tag, path, tag)
if excludedTags:
for tag in excludedTags:
self.info('ignoring tag match for %s: %s',
tag, ', '.join(sorted(excludedTags[tag]))) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **kwargs):
policy.Policy.__init__(self, *args, **kwargs)
self.ipropFilters = []
self.ipropPaths = [ r'%(prefix)s/lib/iconfig/properties/.*\.iprop' ]
self.contents = []
self.paths = []
self.fileFilters = []
self.propMap = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
for filterSpec, iprop in self.paths:
self.fileFilters.append((
filter.Filter(filterSpec, recipe.macros),
iprop,
))
for ipropPath in self.ipropPaths:
self.ipropFilters.append(
filter.Filter(ipropPath, recipe.macros))
policy.Policy.doProcess(self, recipe) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
if path not in self.recipe.autopkg.pathMap:
return
for fltr, iprop in self.fileFilters:
if fltr.match(path):
main, comp = self._getComponent(path)
self._parsePropertyData(iprop, main, comp)
# Make sure any remaining files are actually in the root.
fullpath = self.recipe.macros.destdir + path
if not os.path.isfile(fullpath) or not util.isregular(fullpath):
return
# Check to see if this is an iprop file locaiton that we know about.
for fltr in self.ipropFilters:
if fltr.match(path):
break
else:
return
main, comp = self._getComponent(path)
xml = open(fullpath).read()
self._parsePropertyData(xml, main, comp) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _parsePropertyData(self, xml, pkgName, compName):
pkgSet = self.propMap.setdefault(xml, set())
if (pkgName, compName) in pkgSet:
return
pkgSet.add((pkgName, compName))
self.recipe._addProperty(trove._PROPERTY_TYPE_SMARTFORM, pkgName,
compName, xml) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.devices = []
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def do(self):
for device, kwargs in self.devices:
r = self.recipe
filename = device[0]
owner = device[4]
group = device[5]
r.Ownership(owner, group, filename)
device[0] = device[0] % r.macros
r.autopkg.addDevice(*device, **kwargs) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.sidbits = {}
self.userbits = {}
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
# Don't set modes on capsule files
if self.recipe._getCapsulePathsForFile(path):
return
# Skip files that aren't part of the package
if path not in self.recipe.autopkg.pathMap:
return
newmode = oldmode = self.recipe.autopkg.pathMap[path].inode.perms()
if path in self.userbits:
newmode = (newmode & 077077) | self.userbits[path]
if path in self.sidbits and self.sidbits[path]:
newmode |= self.sidbits[path]
self.info('suid/sgid: %s mode 0%o', path, newmode & 07777)
if newmode != oldmode:
self.recipe.autopkg.pathMap[path].inode.perms.set(newmode) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def do(self):
for component in self.recipe.autopkg.getComponents():
for path in sorted(component.hardlinkMap.keys()):
if self.recipe.autopkg.pathMap[path].flags.isConfig():
self.error("Config file %s has illegal hard links", path)
for path in component.badhardlinks:
self.error("Special file %s has illegal hard links", path) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
policy.Policy.__init__(self, *args, **keywords)
self.excepts = set() | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def do(self):
if self.recipe.getType() == recipe.RECIPE_TYPE_CAPSULE:
return
filters = [(x, filter.Filter(x, self.macros)) for x in self.excepts]
for component in self.recipe.autopkg.getComponents():
for inode in component.linkGroups:
# ensure all in same directory, except for directories
# matching regexps that have been passed in
allPaths = [x for x in component.linkGroups[inode]]
for path in allPaths[:]:
for regexp, f in filters:
if f.match(path):
self.unusedFilters['exceptions'].discard(regexp)
allPaths.remove(path)
dirSet = set(os.path.dirname(x) + '/' for x in allPaths)
if len(dirSet) > 1:
self.error('files %s are hard links across directories %s',
', '.join(sorted(component.linkGroups[inode])),
', '.join(sorted(list(dirSet))))
self.error('If these directories cannot reasonably be'
' on different filesystems, disable this'
' warning by calling'
" r.LinkCount(exceptions=('%s')) or"
" equivalent"
% "', '".join(sorted(list(dirSet)))) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doFile(self, path):
# temporarily do nothing for capsules, we might do something later
if self.recipe._getCapsulePathsForFile(path):
return
fullpath = self.recipe.macros.destdir + os.sep + path
s = os.lstat(fullpath)
mode = s[stat.ST_MODE]
if mode & 0777 != 0755:
self.info('excluding directory %s with mode %o', path, mode&0777)
elif not os.listdir(fullpath):
d = self.recipe.autopkg.pathMap[path]
if d.inode.owner.freeze() != 'root':
self.info('not excluding empty directory %s'
' because of non-root owner', path)
return
elif d.inode.group.freeze() != 'root':
self.info('not excluding empty directory %s'
' because of non-root group', path)
return
self.info('excluding empty directory %s', path)
# if its empty and we're not packaging it, there's no need for it
# to continue to exist on the filesystem to potentially confuse
# other policy actions... see CNP-18
os.rmdir(fullpath)
self.recipe.autopkg.delFile(path) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
if not self.inclusions:
self.inclusions = []
if not self.exceptions:
self.exceptions = []
recipe.setByDefaultOn(frozenset(self.inclusions))
recipe.setByDefaultOff(frozenset(self.exceptions +
self.invariantexceptions)) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def setUserGroupDep(self, path, info, depClass):
componentMap = self.recipe.autopkg.componentMap
if path not in componentMap:
return
pkg = componentMap[path]
f = pkg.getFile(path)
if path not in pkg.requiresMap:
pkg.requiresMap[path] = deps.DependencySet()
pkg.requiresMap[path].addDep(depClass, deps.Dependency(info, [])) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.filespecs = []
self.systemusers = ('root',)
self.systemgroups = ('root',)
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
# we must NEVER take ownership from the filesystem
assert(not self.exceptions)
self.rootdir = self.rootdir % recipe.macros
self.fileFilters = []
for (filespec, user, group) in self.filespecs:
self.fileFilters.append(
(filter.Filter(filespec, recipe.macros),
user %recipe.macros,
group %recipe.macros))
del self.filespecs
policy.Policy.doProcess(self, recipe) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.filespecs = []
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def doProcess(self, recipe):
self.rootdir = self.rootdir % recipe.macros
self.fileFilters = []
for (filespec, item) in self.filespecs:
self.fileFilters.append(
(filter.Filter(filespec, recipe.macros), item))
del self.filespecs
policy.Policy.doProcess(self, recipe) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _markItem(self, path, item):
# pure virtual
assert(False) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _markItem(self, path, user):
if not self.recipe._getCapsulePathsForFile(path):
self.info('user %s: %s' % (user, path))
self.setUserGroupDep(path, user, deps.UserInfoDependencies) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _markItem(self, path, group):
if not self.recipe._getCapsulePathsForFile(path):
self.info('group %s: %s' % (group, path))
self.setUserGroupDep(path, group, deps.GroupInfoDependencies) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.depMap = {
# component: components that require it if they both exist
'data': frozenset(('lib', 'runtime', 'devellib', 'cil', 'java',
'perl', 'python', 'ruby')),
'devellib': frozenset(('devel',)),
'lib': frozenset(('devel', 'devellib', 'runtime')),
'config': frozenset(('runtime', 'lib', 'devellib', 'devel')),
}
self.overridesMap = {}
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def do(self):
flags = []
if self.recipe.isCrossCompileTool():
flags.append((_getTargetDepFlag(self.macros), deps.FLAG_SENSE_REQUIRED))
components = self.recipe.autopkg.components
for packageName in [x.name for x in self.recipe.autopkg.packageMap]:
if packageName in self.overridesMap:
d = self.overridesMap[packageName]
else:
d = self.depMap
for requiredComponent in d:
for requiringComponent in d[requiredComponent]:
reqName = ':'.join((packageName, requiredComponent))
wantName = ':'.join((packageName, requiringComponent))
if (reqName in components and wantName in components and
components[reqName] and components[wantName]):
if (d == self.depMap and
reqName in self.recipe._componentReqs and
wantName in self.recipe._componentReqs):
# this is an automatically generated dependency
# which was not in the parent of a derived
# pacakge. don't add it here either
continue
# Note: this does not add dependencies to files;
# these dependencies are insufficiently specific
# to attach to files.
ds = deps.DependencySet()
depClass = deps.TroveDependencies
ds.addDep(depClass, deps.Dependency(reqName, flags))
p = components[wantName]
p.requires.union(ds) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **keywords):
self.flags = set()
self.excepts = set()
policy.Policy.__init__(self, *args, **keywords) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def do(self):
self.excepts = set(re.compile(x) for x in self.excepts)
self.flags = set(x for x in self.flags
if not [y.match(x) for y in self.excepts])
if self.flags:
flags = [ (x % self.macros, deps.FLAG_SENSE_REQUIRED)
for x in self.flags ]
else:
flags = []
if self.recipe.isCrossCompileTool():
flags.append(('target-%s' % self.macros.target,
deps.FLAG_SENSE_REQUIRED))
for component in self.recipe.autopkg.components.values():
component.provides.addDep(deps.TroveDependencies,
deps.Dependency(component.name, flags)) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def __init__(self, *args, **kwargs):
# bootstrap keeping only one copy of these around
self.bootstrapPythonFlags = None
self.bootstrapSysPath = []
self.bootstrapPerlIncPath = []
self.bootstrapRubyLibs = []
self.cachedProviders = {}
self.pythonFlagNamespace = None
self.removeFlagsByDependencyClass = None # pre-transform
self.removeFlagsByDependencyClassMap = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def preProcess(self):
self.CILPolicyRE = re.compile(r'.*mono/.*/policy.*/policy.*\.config$')
self.legalCharsRE = re.compile('[.0-9A-Za-z_+-/]')
self.pythonInterpRE = re.compile(r'\.[a-z]+-\d\dm?')
# interpolate macros, using canonical path form with no trailing /
self.sonameSubtrees = set(os.path.normpath(x % self.macros)
for x in self.sonameSubtrees)
self.pythonFlagCache = {}
self.pythonTroveFlagCache = {}
self.pythonVersionCache = {} | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _isELF(self, m, contents=None):
"Test whether is ELF file and optionally has certain contents"
# Note: for provides, check for 'abi' not 'provides' because we
# can provide the filename even if there is no provides list
# as long as a DT_NEEDED entry has been present to set the abi
return m and m.name == 'ELF' and self._hasContents(m, contents) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _isPythonModuleCandidate(self, path):
return path.endswith('.so') or self._isPython(path) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _getPythonVersion(self, pythonPath, destdir, libdir):
if pythonPath not in self.pythonVersionCache:
try:
stdout = self._runPythonScript(pythonPath, destdir, libdir,
["import sys", "print('%d.%d' % sys.version_info[:2])"])
self.pythonVersionCache[pythonPath] = stdout.strip()
except (OSError, RuntimeError):
self.warn("Unable to determine Python version directly; "
"guessing based on path.")
self.pythonVersionCache[pythonPath] = self._getPythonVersionFromPath(pythonPath, destdir)
return self.pythonVersionCache[pythonPath] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _warnPythonPathNotInDB(self, pathName):
self.warn('%s found on system but not provided by'
' system database; python requirements'
' may be generated incorrectly as a result', pathName)
return set([]) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _getPythonFlags(self, pathName, bootstrapPythonFlags=None):
if pathName in self.pythonFlagCache:
return self.pythonFlagCache[pathName]
if bootstrapPythonFlags:
self.pythonFlagCache[pathName] = bootstrapPythonFlags
return self.pythonFlagCache[pathName]
db = self._getDb()
foundPath = False
# FIXME: This should be iterFilesByPath when implemented (CNY-1833)
# For now, cache all the python deps in all the files in the
# trove(s) so that we iterate over each trove only once
containingTroveList = db.iterTrovesByPath(pathName)
for containerTrove in containingTroveList:
for pathid, p, fileid, v in containerTrove.iterFileList():
if pathName == p:
foundPath = True
pythonFlags = set()
f = files.ThawFile(db.getFileStream(fileid), pathid)
for dep in f.provides().iterDepsByClass(
deps.PythonDependencies):
flagNames = [x[0] for x in dep.getFlags()[0]]
pythonFlags.update(flagNames)
self.pythonFlagCache[p] = pythonFlags
if not foundPath:
self.pythonFlagCache[pathName] = self._warnPythonPathNotInDB(
pathName)
return self.pythonFlagCache[pathName] | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
def _stringIsPythonVersion(self, s):
return not set(s).difference(set('.0123456789')) | sassoftware/conary | [
47,
9,
47,
4,
1396904066
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.