body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
d95e295f58693a3bc1898beac9ed474c88b9104195d968e46f0e74af48f35632
|
def test_bool_for_numeric(self):
'Test that validator does not allow bool data where numeric is specified.'
self.set_up_spec('numeric')
value = np.bool(1)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'numeric', got 'bool'", "Bar/data (my_bar/data): incorrect type - expected 'numeric', got 'bool'"}
self.assertEqual(result_strings, expected_errors)
|
Test that validator does not allow bool data where numeric is specified.
|
tests/unit/validator_tests/test_validate.py
|
test_bool_for_numeric
|
hrnciar/hdmf
| 0 |
python
|
def test_bool_for_numeric(self):
self.set_up_spec('numeric')
value = np.bool(1)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'numeric', got 'bool'", "Bar/data (my_bar/data): incorrect type - expected 'numeric', got 'bool'"}
self.assertEqual(result_strings, expected_errors)
|
def test_bool_for_numeric(self):
self.set_up_spec('numeric')
value = np.bool(1)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect type - expected 'numeric', got 'bool'", "Bar/data (my_bar/data): incorrect type - expected 'numeric', got 'bool'"}
self.assertEqual(result_strings, expected_errors)<|docstring|>Test that validator does not allow bool data where numeric is specified.<|endoftext|>
|
77798ebd3d6cb5195ecf4a5081d5cc127eba1d7c83a663968e16f3fb0b82c0c6
|
def test_np_bool_for_bool(self):
'Test that validator allows np.bool_ data where bool is specified.'
self.set_up_spec('bool')
value = np.bool_(True)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
Test that validator allows np.bool_ data where bool is specified.
|
tests/unit/validator_tests/test_validate.py
|
test_np_bool_for_bool
|
hrnciar/hdmf
| 0 |
python
|
def test_np_bool_for_bool(self):
self.set_up_spec('bool')
value = np.bool_(True)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
def test_np_bool_for_bool(self):
self.set_up_spec('bool')
value = np.bool_(True)
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)<|docstring|>Test that validator allows np.bool_ data where bool is specified.<|endoftext|>
|
20a45a43d761af8f1fe0daa73b37dd88d93dd168a4d0008348ab0253df804bb7
|
def test_scalar(self):
'Test that validator does not allow a scalar where an array is specified.'
self.set_up_spec('text')
value = 'a string'
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'", "Bar/data (my_bar/data): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'"}
self.assertEqual(result_strings, expected_errors)
|
Test that validator does not allow a scalar where an array is specified.
|
tests/unit/validator_tests/test_validate.py
|
test_scalar
|
hrnciar/hdmf
| 0 |
python
|
def test_scalar(self):
self.set_up_spec('text')
value = 'a string'
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'", "Bar/data (my_bar/data): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'"}
self.assertEqual(result_strings, expected_errors)
|
def test_scalar(self):
self.set_up_spec('text')
value = 'a string'
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
result_strings = set([str(s) for s in results])
expected_errors = {"Bar/attr1 (my_bar.attr1): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'", "Bar/data (my_bar/data): incorrect shape - expected an array of shape '(None,)', got non-array data 'a string'"}
self.assertEqual(result_strings, expected_errors)<|docstring|>Test that validator does not allow a scalar where an array is specified.<|endoftext|>
|
9e06a931526eca1bb623a4a1d19168ec0733f8f053107b3847cacd248a702c04
|
def test_empty_list(self):
'Test that validator allows an empty list where an array is specified.'
self.set_up_spec('text')
value = []
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
Test that validator allows an empty list where an array is specified.
|
tests/unit/validator_tests/test_validate.py
|
test_empty_list
|
hrnciar/hdmf
| 0 |
python
|
def test_empty_list(self):
self.set_up_spec('text')
value = []
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
def test_empty_list(self):
self.set_up_spec('text')
value = []
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)<|docstring|>Test that validator allows an empty list where an array is specified.<|endoftext|>
|
d03ba0853f332d0a3830df51e85332afbd1ebd1cae826765a178d2db249580a2
|
def test_empty_nparray(self):
'Test that validator allows an empty numpy array where an array is specified.'
self.set_up_spec('text')
value = np.array([])
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
Test that validator allows an empty numpy array where an array is specified.
|
tests/unit/validator_tests/test_validate.py
|
test_empty_nparray
|
hrnciar/hdmf
| 0 |
python
|
def test_empty_nparray(self):
self.set_up_spec('text')
value = np.array([])
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)
|
def test_empty_nparray(self):
self.set_up_spec('text')
value = np.array([])
bar_builder = GroupBuilder('my_bar', attributes={'data_type': 'Bar', 'attr1': value}, datasets=[DatasetBuilder('data', value)])
results = self.vmap.validate(bar_builder)
self.assertEqual(len(results), 0)<|docstring|>Test that validator allows an empty numpy array where an array is specified.<|endoftext|>
|
885f79a43d1c76966a34d41aae8eba7f158c4ad9555daabbe8d68f30c2e95988
|
def validate_linkability(self, link, expect_error):
'Execute a linkability test and assert whether or not an IllegalLinkError is returned'
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[link])
result = self.vmap.validate(builder)
if expect_error:
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], IllegalLinkError)
else:
self.assertEqual(len(result), 0)
|
Execute a linkability test and assert whether or not an IllegalLinkError is returned
|
tests/unit/validator_tests/test_validate.py
|
validate_linkability
|
hrnciar/hdmf
| 0 |
python
|
def validate_linkability(self, link, expect_error):
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[link])
result = self.vmap.validate(builder)
if expect_error:
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], IllegalLinkError)
else:
self.assertEqual(len(result), 0)
|
def validate_linkability(self, link, expect_error):
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[link])
result = self.vmap.validate(builder)
if expect_error:
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], IllegalLinkError)
else:
self.assertEqual(len(result), 0)<|docstring|>Execute a linkability test and assert whether or not an IllegalLinkError is returned<|endoftext|>
|
e1b730bf67ba8aded73b528710843052461faf18682e33372d85cff1d73de5cb
|
def test_untyped_linkable_dataset_accepts_link(self):
'Test that the validator accepts a link when the spec has an untyped linkable dataset'
link = LinkBuilder(name='untyped_linkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=False)
|
Test that the validator accepts a link when the spec has an untyped linkable dataset
|
tests/unit/validator_tests/test_validate.py
|
test_untyped_linkable_dataset_accepts_link
|
hrnciar/hdmf
| 0 |
python
|
def test_untyped_linkable_dataset_accepts_link(self):
link = LinkBuilder(name='untyped_linkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=False)
|
def test_untyped_linkable_dataset_accepts_link(self):
link = LinkBuilder(name='untyped_linkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=False)<|docstring|>Test that the validator accepts a link when the spec has an untyped linkable dataset<|endoftext|>
|
f6e030c369aff3cf30694027d4bb78417ec5e4a9ecc4ba6bc1d0168747dffe3d
|
def test_untyped_nonlinkable_dataset_does_not_accept_link(self):
'Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable dataset'
link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=True)
|
Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable dataset
|
tests/unit/validator_tests/test_validate.py
|
test_untyped_nonlinkable_dataset_does_not_accept_link
|
hrnciar/hdmf
| 0 |
python
|
def test_untyped_nonlinkable_dataset_does_not_accept_link(self):
link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=True)
|
def test_untyped_nonlinkable_dataset_does_not_accept_link(self):
link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
self.validate_linkability(link, expect_error=True)<|docstring|>Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable dataset<|endoftext|>
|
96c8b66e25e39aa59b8a16adba28e553e2cfaf696a6eb865e562cff266db0c29
|
def test_typed_linkable_dataset_accepts_link(self):
'Test that the validator accepts a link when the spec has a typed linkable dataset'
link = LinkBuilder(name='typed_linkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=False)
|
Test that the validator accepts a link when the spec has a typed linkable dataset
|
tests/unit/validator_tests/test_validate.py
|
test_typed_linkable_dataset_accepts_link
|
hrnciar/hdmf
| 0 |
python
|
def test_typed_linkable_dataset_accepts_link(self):
link = LinkBuilder(name='typed_linkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=False)
|
def test_typed_linkable_dataset_accepts_link(self):
link = LinkBuilder(name='typed_linkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=False)<|docstring|>Test that the validator accepts a link when the spec has a typed linkable dataset<|endoftext|>
|
bdd6d005adc2a22e6336f6abf889adcaae2940298cb2e1fcdc941fe48f094ddf
|
def test_typed_nonlinkable_dataset_does_not_accept_link(self):
'Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable dataset'
link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=True)
|
Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable dataset
|
tests/unit/validator_tests/test_validate.py
|
test_typed_nonlinkable_dataset_does_not_accept_link
|
hrnciar/hdmf
| 0 |
python
|
def test_typed_nonlinkable_dataset_does_not_accept_link(self):
link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=True)
|
def test_typed_nonlinkable_dataset_does_not_accept_link(self):
link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
self.validate_linkability(link, expect_error=True)<|docstring|>Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable dataset<|endoftext|>
|
4423e32f2e293af708d957331db686b831f7f7ff896c7fab8ee88c1f4b503148
|
def test_untyped_linkable_group_accepts_link(self):
'Test that the validator accepts a link when the spec has an untyped linkable group'
link = LinkBuilder(name='untyped_linkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=False)
|
Test that the validator accepts a link when the spec has an untyped linkable group
|
tests/unit/validator_tests/test_validate.py
|
test_untyped_linkable_group_accepts_link
|
hrnciar/hdmf
| 0 |
python
|
def test_untyped_linkable_group_accepts_link(self):
link = LinkBuilder(name='untyped_linkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=False)
|
def test_untyped_linkable_group_accepts_link(self):
link = LinkBuilder(name='untyped_linkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=False)<|docstring|>Test that the validator accepts a link when the spec has an untyped linkable group<|endoftext|>
|
acf0206ac145d0ebc1c26c7a32391b84d647ba8ca7b94c257ce17a8fe1828f03
|
def test_untyped_nonlinkable_group_does_not_accept_link(self):
'Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable group'
link = LinkBuilder(name='untyped_nonlinkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=True)
|
Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable group
|
tests/unit/validator_tests/test_validate.py
|
test_untyped_nonlinkable_group_does_not_accept_link
|
hrnciar/hdmf
| 0 |
python
|
def test_untyped_nonlinkable_group_does_not_accept_link(self):
link = LinkBuilder(name='untyped_nonlinkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=True)
|
def test_untyped_nonlinkable_group_does_not_accept_link(self):
link = LinkBuilder(name='untyped_nonlinkable_group', builder=GroupBuilder('foo'))
self.validate_linkability(link, expect_error=True)<|docstring|>Test that the validator returns an IllegalLinkError when the spec has an untyped non-linkable group<|endoftext|>
|
dc70e062ba05c1211a16156d345ecd58636e96563a3a4316ab6df3e748012951
|
def test_typed_linkable_group_accepts_link(self):
'Test that the validator accepts a link when the spec has a typed linkable group'
link = LinkBuilder(name='typed_linkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=False)
|
Test that the validator accepts a link when the spec has a typed linkable group
|
tests/unit/validator_tests/test_validate.py
|
test_typed_linkable_group_accepts_link
|
hrnciar/hdmf
| 0 |
python
|
def test_typed_linkable_group_accepts_link(self):
link = LinkBuilder(name='typed_linkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=False)
|
def test_typed_linkable_group_accepts_link(self):
link = LinkBuilder(name='typed_linkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=False)<|docstring|>Test that the validator accepts a link when the spec has a typed linkable group<|endoftext|>
|
eb5185c59151146924bafd04cb7853d2743a10732581107569168ce36ad0bfde
|
def test_typed_nonlinkable_group_does_not_accept_link(self):
'Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable group'
link = LinkBuilder(name='typed_nonlinkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=True)
|
Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable group
|
tests/unit/validator_tests/test_validate.py
|
test_typed_nonlinkable_group_does_not_accept_link
|
hrnciar/hdmf
| 0 |
python
|
def test_typed_nonlinkable_group_does_not_accept_link(self):
link = LinkBuilder(name='typed_nonlinkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=True)
|
def test_typed_nonlinkable_group_does_not_accept_link(self):
link = LinkBuilder(name='typed_nonlinkable_group', builder=GroupBuilder('foo', attributes={'data_type': 'Bar'}))
self.validate_linkability(link, expect_error=True)<|docstring|>Test that the validator returns an IllegalLinkError when the spec has a typed non-linkable group<|endoftext|>
|
c3b2e6313476903f68b9845a7ecd809fa550d9bdd4f38dc118e11ec351f53cde
|
@mock.patch('hdmf.validate.validator.DatasetValidator.validate')
def test_should_not_validate_illegally_linked_objects(self, mock_validator):
'Test that an illegally linked child dataset is not validated\n\n Note: this behavior is expected to change in the future:\n https://github.com/hdmf-dev/hdmf/issues/516\n '
self.set_up_spec()
typed_link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
untyped_link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[typed_link, untyped_link])
_ = self.vmap.validate(builder)
assert (not mock_validator.called)
|
Test that an illegally linked child dataset is not validated
Note: this behavior is expected to change in the future:
https://github.com/hdmf-dev/hdmf/issues/516
|
tests/unit/validator_tests/test_validate.py
|
test_should_not_validate_illegally_linked_objects
|
hrnciar/hdmf
| 0 |
python
|
@mock.patch('hdmf.validate.validator.DatasetValidator.validate')
def test_should_not_validate_illegally_linked_objects(self, mock_validator):
'Test that an illegally linked child dataset is not validated\n\n Note: this behavior is expected to change in the future:\n https://github.com/hdmf-dev/hdmf/issues/516\n '
self.set_up_spec()
typed_link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
untyped_link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[typed_link, untyped_link])
_ = self.vmap.validate(builder)
assert (not mock_validator.called)
|
@mock.patch('hdmf.validate.validator.DatasetValidator.validate')
def test_should_not_validate_illegally_linked_objects(self, mock_validator):
'Test that an illegally linked child dataset is not validated\n\n Note: this behavior is expected to change in the future:\n https://github.com/hdmf-dev/hdmf/issues/516\n '
self.set_up_spec()
typed_link = LinkBuilder(name='typed_nonlinkable_ds', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))
untyped_link = LinkBuilder(name='untyped_nonlinkable_ds', builder=DatasetBuilder('foo'))
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, links=[typed_link, untyped_link])
_ = self.vmap.validate(builder)
assert (not mock_validator.called)<|docstring|>Test that an illegally linked child dataset is not validated
Note: this behavior is expected to change in the future:
https://github.com/hdmf-dev/hdmf/issues/516<|endoftext|>
|
9669e2d39ade896144094fb5c7bdff73ed74c42796b364ef9f61fafd34eae54a
|
def validate_multiple_children(self, dataset_names, group_names):
'Utility function to validate a builder with the specified named dataset and group children'
self.set_up_spec()
datasets = [DatasetBuilder(ds, attributes={'data_type': 'Foo'}) for ds in dataset_names]
groups = [GroupBuilder(gr, attributes={'data_type': 'Bar'}) for gr in group_names]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups)
return self.vmap.validate(builder)
|
Utility function to validate a builder with the specified named dataset and group children
|
tests/unit/validator_tests/test_validate.py
|
validate_multiple_children
|
hrnciar/hdmf
| 0 |
python
|
def validate_multiple_children(self, dataset_names, group_names):
self.set_up_spec()
datasets = [DatasetBuilder(ds, attributes={'data_type': 'Foo'}) for ds in dataset_names]
groups = [GroupBuilder(gr, attributes={'data_type': 'Bar'}) for gr in group_names]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups)
return self.vmap.validate(builder)
|
def validate_multiple_children(self, dataset_names, group_names):
self.set_up_spec()
datasets = [DatasetBuilder(ds, attributes={'data_type': 'Foo'}) for ds in dataset_names]
groups = [GroupBuilder(gr, attributes={'data_type': 'Bar'}) for gr in group_names]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups)
return self.vmap.validate(builder)<|docstring|>Utility function to validate a builder with the specified named dataset and group children<|endoftext|>
|
01a648eafc5fa92d59acc120a8b742a3115e6c72998cf3ad8725016759a1618b
|
def test_missing_first_dataset_should_return_error(self):
'Test that the validator returns a MissingDataType error if the first dataset is missing'
result = self.validate_multiple_children(['b'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that the validator returns a MissingDataType error if the first dataset is missing
|
tests/unit/validator_tests/test_validate.py
|
test_missing_first_dataset_should_return_error
|
hrnciar/hdmf
| 0 |
python
|
def test_missing_first_dataset_should_return_error(self):
result = self.validate_multiple_children(['b'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_missing_first_dataset_should_return_error(self):
result = self.validate_multiple_children(['b'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that the validator returns a MissingDataType error if the first dataset is missing<|endoftext|>
|
8968bc7e5611cfe370d45fc8f6bdddbdabf3793483e152a7f33b2d99c7c8b22e
|
def test_missing_last_dataset_should_return_error(self):
'Test that the validator returns a MissingDataType error if the last dataset is missing'
result = self.validate_multiple_children(['a'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that the validator returns a MissingDataType error if the last dataset is missing
|
tests/unit/validator_tests/test_validate.py
|
test_missing_last_dataset_should_return_error
|
hrnciar/hdmf
| 0 |
python
|
def test_missing_last_dataset_should_return_error(self):
result = self.validate_multiple_children(['a'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_missing_last_dataset_should_return_error(self):
result = self.validate_multiple_children(['a'], ['x', 'y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that the validator returns a MissingDataType error if the last dataset is missing<|endoftext|>
|
f96b07999009e94467700b27caf59007440ba23b6edb978f6fda897aded43e0b
|
def test_missing_first_group_should_return_error(self):
'Test that the validator returns a MissingDataType error if the first group is missing'
result = self.validate_multiple_children(['a', 'b'], ['y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that the validator returns a MissingDataType error if the first group is missing
|
tests/unit/validator_tests/test_validate.py
|
test_missing_first_group_should_return_error
|
hrnciar/hdmf
| 0 |
python
|
def test_missing_first_group_should_return_error(self):
result = self.validate_multiple_children(['a', 'b'], ['y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_missing_first_group_should_return_error(self):
result = self.validate_multiple_children(['a', 'b'], ['y'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that the validator returns a MissingDataType error if the first group is missing<|endoftext|>
|
b5e8b831d2cf8f4dee55d820212536f4a011c08f8e6335f0b0dccd8ecf2f200b
|
def test_missing_last_group_should_return_error(self):
'Test that the validator returns a MissingDataType error if the last group is missing'
result = self.validate_multiple_children(['a', 'b'], ['x'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that the validator returns a MissingDataType error if the last group is missing
|
tests/unit/validator_tests/test_validate.py
|
test_missing_last_group_should_return_error
|
hrnciar/hdmf
| 0 |
python
|
def test_missing_last_group_should_return_error(self):
result = self.validate_multiple_children(['a', 'b'], ['x'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_missing_last_group_should_return_error(self):
result = self.validate_multiple_children(['a', 'b'], ['x'])
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that the validator returns a MissingDataType error if the last group is missing<|endoftext|>
|
75796437674bfb8d315bc15e8f175066000515f22d27eff5c0ea197f56734883
|
def test_no_errors_when_all_children_satisfied(self):
'Test that the validator does not return an error if all child specs are satisfied'
result = self.validate_multiple_children(['a', 'b'], ['x', 'y'])
self.assertEqual(len(result), 0)
|
Test that the validator does not return an error if all child specs are satisfied
|
tests/unit/validator_tests/test_validate.py
|
test_no_errors_when_all_children_satisfied
|
hrnciar/hdmf
| 0 |
python
|
def test_no_errors_when_all_children_satisfied(self):
result = self.validate_multiple_children(['a', 'b'], ['x', 'y'])
self.assertEqual(len(result), 0)
|
def test_no_errors_when_all_children_satisfied(self):
result = self.validate_multiple_children(['a', 'b'], ['x', 'y'])
self.assertEqual(len(result), 0)<|docstring|>Test that the validator does not return an error if all child specs are satisfied<|endoftext|>
|
3bcb7ae5eed431929b29408452dcae582a614b674c6e1b68240f85c73cf483be
|
def validate_matching_link_data_type_case(self, datasets, groups, links):
'Execute validation against a group builder using the provided group\n children and verify that a MissingDataType error is returned\n '
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups, links=links)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Execute validation against a group builder using the provided group
children and verify that a MissingDataType error is returned
|
tests/unit/validator_tests/test_validate.py
|
validate_matching_link_data_type_case
|
hrnciar/hdmf
| 0 |
python
|
def validate_matching_link_data_type_case(self, datasets, groups, links):
'Execute validation against a group builder using the provided group\n children and verify that a MissingDataType error is returned\n '
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups, links=links)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def validate_matching_link_data_type_case(self, datasets, groups, links):
'Execute validation against a group builder using the provided group\n children and verify that a MissingDataType error is returned\n '
self.set_up_spec()
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets, groups=groups, links=links)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Execute validation against a group builder using the provided group
children and verify that a MissingDataType error is returned<|endoftext|>
|
06ba0c1d7b236b0a26b33fccde7fa9987d301794c409d8de9c8fd1298a9d0e27
|
def test_error_on_missing_child_dataset(self):
'Test that a MissingDataType is returned when the child dataset is missing'
datasets = []
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
Test that a MissingDataType is returned when the child dataset is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_on_missing_child_dataset
|
hrnciar/hdmf
| 0 |
python
|
def test_error_on_missing_child_dataset(self):
datasets = []
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
def test_error_on_missing_child_dataset(self):
datasets = []
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)<|docstring|>Test that a MissingDataType is returned when the child dataset is missing<|endoftext|>
|
90f1358e67968dd65009016f0a24319038ec082e21c3d0ca596f11e68e46a728
|
def test_error_on_missing_linked_dataset(self):
'Test that a MissingDataType is returned when the linked dataset is missing'
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
Test that a MissingDataType is returned when the linked dataset is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_on_missing_linked_dataset
|
hrnciar/hdmf
| 0 |
python
|
def test_error_on_missing_linked_dataset(self):
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
def test_error_on_missing_linked_dataset(self):
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)<|docstring|>Test that a MissingDataType is returned when the linked dataset is missing<|endoftext|>
|
719590ecaa52f6913982c25cf6a6956d247041ceb3d7a2bbdaac08c3a2369a1e
|
def test_error_on_missing_group(self):
'Test that a MissingDataType is returned when the child group is missing'
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = []
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
Test that a MissingDataType is returned when the child group is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_on_missing_group
|
hrnciar/hdmf
| 0 |
python
|
def test_error_on_missing_group(self):
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = []
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
def test_error_on_missing_group(self):
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = []
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'})), LinkBuilder(name='group_link', builder=GroupBuilder('bar', attributes={'data_type': 'Bar'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)<|docstring|>Test that a MissingDataType is returned when the child group is missing<|endoftext|>
|
7f3f556477e842f81b9e70d731ebc4a60a336dbdc7a185a34f10a815e86192aa
|
def test_error_on_missing_linked_group(self):
'Test that a MissingDataType is returned when the linked group is missing'
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
Test that a MissingDataType is returned when the linked group is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_on_missing_linked_group
|
hrnciar/hdmf
| 0 |
python
|
def test_error_on_missing_linked_group(self):
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)
|
def test_error_on_missing_linked_group(self):
self.set_up_spec()
datasets = [DatasetBuilder('dataset', attributes={'data_type': 'Foo'})]
groups = [GroupBuilder('group', attributes={'data_type': 'Bar'})]
links = [LinkBuilder(name='dataset_link', builder=DatasetBuilder('foo', attributes={'data_type': 'Foo'}))]
self.validate_matching_link_data_type_case(datasets, groups, links)<|docstring|>Test that a MissingDataType is returned when the linked group is missing<|endoftext|>
|
8158a6c2f9260ab8e48e275e2e609a7a638bd4f19a0dcd22d3842a662a9a1539
|
def test_error_returned_when_child_at_highest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the highest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that a MissingDataType error is returned when the dataset at
the highest level of the inheritance hierarchy is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_returned_when_child_at_highest_level_missing
|
hrnciar/hdmf
| 0 |
python
|
def test_error_returned_when_child_at_highest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the highest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_error_returned_when_child_at_highest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the highest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that a MissingDataType error is returned when the dataset at
the highest level of the inheritance hierarchy is missing<|endoftext|>
|
92a27ebe4a541e9ff6a67c10f099bfef14bfd6d15fdb601d679f7992d6bb5c79
|
def test_error_returned_when_child_at_lowest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the lowest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
Test that a MissingDataType error is returned when the dataset at
the lowest level of the inheritance hierarchy is missing
|
tests/unit/validator_tests/test_validate.py
|
test_error_returned_when_child_at_lowest_level_missing
|
hrnciar/hdmf
| 0 |
python
|
def test_error_returned_when_child_at_lowest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the lowest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)
|
def test_error_returned_when_child_at_lowest_level_missing(self):
'Test that a MissingDataType error is returned when the dataset at\n the lowest level of the inheritance hierarchy is missing\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], MissingDataType)<|docstring|>Test that a MissingDataType error is returned when the dataset at
the lowest level of the inheritance hierarchy is missing<|endoftext|>
|
631071499eb8a083bb80129e8b6234400d7d2e692ed3ad3cd874e410294b8c55
|
def test_both_levels_of_hierarchy_validated(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'}), DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)
|
Test that when both required children at separate levels of
inheritance hierarchy are present, both child specs are satisfied
|
tests/unit/validator_tests/test_validate.py
|
test_both_levels_of_hierarchy_validated
|
hrnciar/hdmf
| 0 |
python
|
def test_both_levels_of_hierarchy_validated(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'}), DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)
|
def test_both_levels_of_hierarchy_validated(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied\n '
self.set_up_spec()
datasets = [DatasetBuilder('foo', attributes={'data_type': 'Foo'}), DatasetBuilder('bar', attributes={'data_type': 'Bar'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)<|docstring|>Test that when both required children at separate levels of
inheritance hierarchy are present, both child specs are satisfied<|endoftext|>
|
358825b7021c5938ce9a4761252f08daa79a7bab4ee4d41c83e4c36ef3b1c3d3
|
@skip('Functionality not yet supported')
def test_both_levels_of_hierarchy_validated_inverted_order(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied.\n This should work no matter what the order of the builders.\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'}), DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)
|
Test that when both required children at separate levels of
inheritance hierarchy are present, both child specs are satisfied.
This should work no matter what the order of the builders.
|
tests/unit/validator_tests/test_validate.py
|
test_both_levels_of_hierarchy_validated_inverted_order
|
hrnciar/hdmf
| 0 |
python
|
@skip('Functionality not yet supported')
def test_both_levels_of_hierarchy_validated_inverted_order(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied.\n This should work no matter what the order of the builders.\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'}), DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)
|
@skip('Functionality not yet supported')
def test_both_levels_of_hierarchy_validated_inverted_order(self):
'Test that when both required children at separate levels of\n inheritance hierarchy are present, both child specs are satisfied.\n This should work no matter what the order of the builders.\n '
self.set_up_spec()
datasets = [DatasetBuilder('bar', attributes={'data_type': 'Bar'}), DatasetBuilder('foo', attributes={'data_type': 'Foo'})]
builder = GroupBuilder('my_baz', attributes={'data_type': 'Baz'}, datasets=datasets)
result = self.vmap.validate(builder)
self.assertEqual(len(result), 0)<|docstring|>Test that when both required children at separate levels of
inheritance hierarchy are present, both child specs are satisfied.
This should work no matter what the order of the builders.<|endoftext|>
|
acf1a07ec1fb601816c5b13ed08dbde90c69ac7d3a0c2f023bf6b88017c69fac
|
def get(isamAppliance, check_mode=False, force=False):
'\n Retrieve the stored ISAM credential\n '
return isamAppliance.invoke_get('Retrieve the stored ISAM credential', '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)
|
Retrieve the stored ISAM credential
|
ibmsecurity/isam/web/api_access_control/utilities/credential.py
|
get
|
zone-zero/ibmsecurity
| 46 |
python
|
def get(isamAppliance, check_mode=False, force=False):
'\n \n '
return isamAppliance.invoke_get(, '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)
|
def get(isamAppliance, check_mode=False, force=False):
'\n \n '
return isamAppliance.invoke_get(, '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)<|docstring|>Retrieve the stored ISAM credential<|endoftext|>
|
2ed943bae847646166288828469e64a6317b5ffe65726335c0699c9f0c4f5b09
|
def add(isamAppliance, admin_id, admin_pwd, admin_domain='Default', check_mode=False, force=False):
'\n Store the ISAM administrator credentials\n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post('Store the ISAM administrator credentials', '{0}'.format(uri), {'admin_id': admin_id, 'admin_pwd': admin_pwd, 'admin_domain': admin_domain}, requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
|
Store the ISAM administrator credentials
|
ibmsecurity/isam/web/api_access_control/utilities/credential.py
|
add
|
zone-zero/ibmsecurity
| 46 |
python
|
def add(isamAppliance, admin_id, admin_pwd, admin_domain='Default', check_mode=False, force=False):
'\n \n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post(, '{0}'.format(uri), {'admin_id': admin_id, 'admin_pwd': admin_pwd, 'admin_domain': admin_domain}, requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
|
def add(isamAppliance, admin_id, admin_pwd, admin_domain='Default', check_mode=False, force=False):
'\n \n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is False)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post(, '{0}'.format(uri), {'admin_id': admin_id, 'admin_pwd': admin_pwd, 'admin_domain': admin_domain}, requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)<|docstring|>Store the ISAM administrator credentials<|endoftext|>
|
5ddd4ab72e2414b58785ba9276d665cc69e085935cfe70e993dd07248285a3ac
|
def delete(isamAppliance, check_mode=False, force=False):
'\n Delete the stored ISAM administrator credential\n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete('Delete the stored ISAM administrator credential', '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
|
Delete the stored ISAM administrator credential
|
ibmsecurity/isam/web/api_access_control/utilities/credential.py
|
delete
|
zone-zero/ibmsecurity
| 46 |
python
|
def delete(isamAppliance, check_mode=False, force=False):
'\n \n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete(, '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)
|
def delete(isamAppliance, check_mode=False, force=False):
'\n \n '
(exist, warnings) = _check(isamAppliance)
if ((force is True) or (exist is True)):
if (check_mode is True):
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete(, '{0}'.format(uri), requires_modules=requires_modules, requires_version=requires_version)
return isamAppliance.create_return_object(warnings=warnings)<|docstring|>Delete the stored ISAM administrator credential<|endoftext|>
|
7e49f2d7beca186f9573fdc54f571e1a9a947a8e14f484244a8452c5e6339d07
|
def harary(A: SparseTensor, vtx_color=None, threshold=0.97):
'\n Harary bipartite decomposition\n\n Parameters\n ----------\n A: :py:class:`SparseTensor`\n The adjacency matrix\n vtx_color: array_like, optional\n All valid type for :py:func:`np.asarray` is acceptable, including :py:class:`torch.Tensor` on cpu. If None,\n this function will invoke :py:func:`thgsp.alg.dsatur` silently.\n\n threshold: float, optional\n\n Returns\n -------\n bptG: array\n An array consisting of :obj:`M` bipartite subgraphs formatted as :class:`scipy.sparse.lil_matrix`.\n beta: array\n :obj:`beta[:,i]` is the bipartite set indicator of :obj:`i`-th subgraph.\n beta_dist: array\n A table showing the relationship between :obj:`beta` and :obj:`channels`\n new_vtx_color: array\n The node colors\n mapper: dict\n Map **new_vtx_color** to the original ones. For example mapper={1:2, 2:3, 3:1} will\n map 1,2 and 3-th color to 2,3 and 1, respectively.\n\n '
if (vtx_color is None):
vtx_color = dsatur(A)
vtx_color = np.asarray(vtx_color)
n_color = (max(vtx_color) + 1)
if (n_color > 256):
raise RuntimeError('Too many colors will lead to a too complicated channel division')
A = A.to_scipy(layout='csr').tolil()
M = int(np.ceil(np.log2(n_color)))
N = A.shape[(- 1)]
new_color_ordinal = new_order(n_color)
mapper = {c: i for (i, c) in enumerate(new_color_ordinal)}
new_vtx_color = [mapper[c] for c in vtx_color]
beta_dist = distribute_color(n_color, M)
bptG = [lil_matrix((N, N), dtype=A.dtype) for _ in range(M)]
link_weights = (- np.ones(M))
beta = np.zeros((N, M), dtype=bool)
for i in range(M):
colors_L = (beta_dist[(:, i)] == 1).nonzero()[0]
bt = np.in1d(new_vtx_color, colors_L)
beta[(:, i)] = bt
mask = bipartite_mask(bt)
bpt_edges = A[mask]
bptG[i][mask] = bpt_edges
link_weights[i] = bpt_edges.sum()
A[mask] = 0
ratio_link_weights = (link_weights.cumsum(0) / link_weights.sum())
bpt_idx = (ratio_link_weights >= threshold).nonzero()[0]
M1 = (bpt_idx[0] + 1)
bptG = bptG[:M1]
max_color = np.power(2, M1)
beta_dist = distribute_color(max_color, M1)
beta = beta[(:, :M1)]
return (bptG, beta, beta_dist, vtx_color, mapper)
|
Harary bipartite decomposition
Parameters
----------
A: :py:class:`SparseTensor`
The adjacency matrix
vtx_color: array_like, optional
All valid type for :py:func:`np.asarray` is acceptable, including :py:class:`torch.Tensor` on cpu. If None,
this function will invoke :py:func:`thgsp.alg.dsatur` silently.
threshold: float, optional
Returns
-------
bptG: array
An array consisting of :obj:`M` bipartite subgraphs formatted as :class:`scipy.sparse.lil_matrix`.
beta: array
:obj:`beta[:,i]` is the bipartite set indicator of :obj:`i`-th subgraph.
beta_dist: array
A table showing the relationship between :obj:`beta` and :obj:`channels`
new_vtx_color: array
The node colors
mapper: dict
Map **new_vtx_color** to the original ones. For example mapper={1:2, 2:3, 3:1} will
map 1,2 and 3-th color to 2,3 and 1, respectively.
|
thgsp/bga/harary.py
|
harary
|
bwdeng20/thgsp
| 22 |
python
|
def harary(A: SparseTensor, vtx_color=None, threshold=0.97):
'\n Harary bipartite decomposition\n\n Parameters\n ----------\n A: :py:class:`SparseTensor`\n The adjacency matrix\n vtx_color: array_like, optional\n All valid type for :py:func:`np.asarray` is acceptable, including :py:class:`torch.Tensor` on cpu. If None,\n this function will invoke :py:func:`thgsp.alg.dsatur` silently.\n\n threshold: float, optional\n\n Returns\n -------\n bptG: array\n An array consisting of :obj:`M` bipartite subgraphs formatted as :class:`scipy.sparse.lil_matrix`.\n beta: array\n :obj:`beta[:,i]` is the bipartite set indicator of :obj:`i`-th subgraph.\n beta_dist: array\n A table showing the relationship between :obj:`beta` and :obj:`channels`\n new_vtx_color: array\n The node colors\n mapper: dict\n Map **new_vtx_color** to the original ones. For example mapper={1:2, 2:3, 3:1} will\n map 1,2 and 3-th color to 2,3 and 1, respectively.\n\n '
if (vtx_color is None):
vtx_color = dsatur(A)
vtx_color = np.asarray(vtx_color)
n_color = (max(vtx_color) + 1)
if (n_color > 256):
raise RuntimeError('Too many colors will lead to a too complicated channel division')
A = A.to_scipy(layout='csr').tolil()
M = int(np.ceil(np.log2(n_color)))
N = A.shape[(- 1)]
new_color_ordinal = new_order(n_color)
mapper = {c: i for (i, c) in enumerate(new_color_ordinal)}
new_vtx_color = [mapper[c] for c in vtx_color]
beta_dist = distribute_color(n_color, M)
bptG = [lil_matrix((N, N), dtype=A.dtype) for _ in range(M)]
link_weights = (- np.ones(M))
beta = np.zeros((N, M), dtype=bool)
for i in range(M):
colors_L = (beta_dist[(:, i)] == 1).nonzero()[0]
bt = np.in1d(new_vtx_color, colors_L)
beta[(:, i)] = bt
mask = bipartite_mask(bt)
bpt_edges = A[mask]
bptG[i][mask] = bpt_edges
link_weights[i] = bpt_edges.sum()
A[mask] = 0
ratio_link_weights = (link_weights.cumsum(0) / link_weights.sum())
bpt_idx = (ratio_link_weights >= threshold).nonzero()[0]
M1 = (bpt_idx[0] + 1)
bptG = bptG[:M1]
max_color = np.power(2, M1)
beta_dist = distribute_color(max_color, M1)
beta = beta[(:, :M1)]
return (bptG, beta, beta_dist, vtx_color, mapper)
|
def harary(A: SparseTensor, vtx_color=None, threshold=0.97):
'\n Harary bipartite decomposition\n\n Parameters\n ----------\n A: :py:class:`SparseTensor`\n The adjacency matrix\n vtx_color: array_like, optional\n All valid type for :py:func:`np.asarray` is acceptable, including :py:class:`torch.Tensor` on cpu. If None,\n this function will invoke :py:func:`thgsp.alg.dsatur` silently.\n\n threshold: float, optional\n\n Returns\n -------\n bptG: array\n An array consisting of :obj:`M` bipartite subgraphs formatted as :class:`scipy.sparse.lil_matrix`.\n beta: array\n :obj:`beta[:,i]` is the bipartite set indicator of :obj:`i`-th subgraph.\n beta_dist: array\n A table showing the relationship between :obj:`beta` and :obj:`channels`\n new_vtx_color: array\n The node colors\n mapper: dict\n Map **new_vtx_color** to the original ones. For example mapper={1:2, 2:3, 3:1} will\n map 1,2 and 3-th color to 2,3 and 1, respectively.\n\n '
if (vtx_color is None):
vtx_color = dsatur(A)
vtx_color = np.asarray(vtx_color)
n_color = (max(vtx_color) + 1)
if (n_color > 256):
raise RuntimeError('Too many colors will lead to a too complicated channel division')
A = A.to_scipy(layout='csr').tolil()
M = int(np.ceil(np.log2(n_color)))
N = A.shape[(- 1)]
new_color_ordinal = new_order(n_color)
mapper = {c: i for (i, c) in enumerate(new_color_ordinal)}
new_vtx_color = [mapper[c] for c in vtx_color]
beta_dist = distribute_color(n_color, M)
bptG = [lil_matrix((N, N), dtype=A.dtype) for _ in range(M)]
link_weights = (- np.ones(M))
beta = np.zeros((N, M), dtype=bool)
for i in range(M):
colors_L = (beta_dist[(:, i)] == 1).nonzero()[0]
bt = np.in1d(new_vtx_color, colors_L)
beta[(:, i)] = bt
mask = bipartite_mask(bt)
bpt_edges = A[mask]
bptG[i][mask] = bpt_edges
link_weights[i] = bpt_edges.sum()
A[mask] = 0
ratio_link_weights = (link_weights.cumsum(0) / link_weights.sum())
bpt_idx = (ratio_link_weights >= threshold).nonzero()[0]
M1 = (bpt_idx[0] + 1)
bptG = bptG[:M1]
max_color = np.power(2, M1)
beta_dist = distribute_color(max_color, M1)
beta = beta[(:, :M1)]
return (bptG, beta, beta_dist, vtx_color, mapper)<|docstring|>Harary bipartite decomposition
Parameters
----------
A: :py:class:`SparseTensor`
The adjacency matrix
vtx_color: array_like, optional
All valid type for :py:func:`np.asarray` is acceptable, including :py:class:`torch.Tensor` on cpu. If None,
this function will invoke :py:func:`thgsp.alg.dsatur` silently.
threshold: float, optional
Returns
-------
bptG: array
An array consisting of :obj:`M` bipartite subgraphs formatted as :class:`scipy.sparse.lil_matrix`.
beta: array
:obj:`beta[:,i]` is the bipartite set indicator of :obj:`i`-th subgraph.
beta_dist: array
A table showing the relationship between :obj:`beta` and :obj:`channels`
new_vtx_color: array
The node colors
mapper: dict
Map **new_vtx_color** to the original ones. For example mapper={1:2, 2:3, 3:1} will
map 1,2 and 3-th color to 2,3 and 1, respectively.<|endoftext|>
|
5d4d847b5140e9bb7ead55ce75e70904b98d37616f795e10c410210608e684f5
|
def get_landmark_seen(self, frame_idx):
'Get landmark index that camera sees in a frame. \n\n Args:\n frame_idx (int): index of the frame\n\n Returns:\n tuple: index of all features in given frame, pixel coordinates\n '
pixels = self.features[(:, :, frame_idx)]
valid = (pixels[0] != (- 1))
feature_idx = self.features_idx[valid]
return feature_idx
|
Get landmark index that camera sees in a frame.
Args:
frame_idx (int): index of the frame
Returns:
tuple: index of all features in given frame, pixel coordinates
|
sensors.py
|
get_landmark_seen
|
chenfengw/visual-inertial-slam
| 2 |
python
|
def get_landmark_seen(self, frame_idx):
'Get landmark index that camera sees in a frame. \n\n Args:\n frame_idx (int): index of the frame\n\n Returns:\n tuple: index of all features in given frame, pixel coordinates\n '
pixels = self.features[(:, :, frame_idx)]
valid = (pixels[0] != (- 1))
feature_idx = self.features_idx[valid]
return feature_idx
|
def get_landmark_seen(self, frame_idx):
'Get landmark index that camera sees in a frame. \n\n Args:\n frame_idx (int): index of the frame\n\n Returns:\n tuple: index of all features in given frame, pixel coordinates\n '
pixels = self.features[(:, :, frame_idx)]
valid = (pixels[0] != (- 1))
feature_idx = self.features_idx[valid]
return feature_idx<|docstring|>Get landmark index that camera sees in a frame.
Args:
frame_idx (int): index of the frame
Returns:
tuple: index of all features in given frame, pixel coordinates<|endoftext|>
|
d961510f5ccbde854fc37e5722090fc272c50a9345cf4cc0fa1f7e659e8153e7
|
def pixel_to_xyz(self, pixels, max_depth=25):
'Given pixel coordinates find out xyz in camera frame\n\n Args:\n pixels (np array): 4 x N_features\n max_depth (int): set max pixel depth in meter\n Returns:\n np array: xyz coordinates of pixels in homogenous coordinates, \n 3 (x,y,z) x N_features\n '
assert (pixels.shape[0] == 4)
d = np.abs((pixels[0] - pixels[2]))
z = (self.fsub / d)
z[(z > max_depth)] = max_depth
u_L = pixels[0]
v_L = pixels[1]
x = (((u_L - self.cu) / self.fsu) * z)
y = (((v_L - self.cv) / self.fsv) * z)
return np.vstack((x, y, z))
|
Given pixel coordinates find out xyz in camera frame
Args:
pixels (np array): 4 x N_features
max_depth (int): set max pixel depth in meter
Returns:
np array: xyz coordinates of pixels in homogenous coordinates,
3 (x,y,z) x N_features
|
sensors.py
|
pixel_to_xyz
|
chenfengw/visual-inertial-slam
| 2 |
python
|
def pixel_to_xyz(self, pixels, max_depth=25):
'Given pixel coordinates find out xyz in camera frame\n\n Args:\n pixels (np array): 4 x N_features\n max_depth (int): set max pixel depth in meter\n Returns:\n np array: xyz coordinates of pixels in homogenous coordinates, \n 3 (x,y,z) x N_features\n '
assert (pixels.shape[0] == 4)
d = np.abs((pixels[0] - pixels[2]))
z = (self.fsub / d)
z[(z > max_depth)] = max_depth
u_L = pixels[0]
v_L = pixels[1]
x = (((u_L - self.cu) / self.fsu) * z)
y = (((v_L - self.cv) / self.fsv) * z)
return np.vstack((x, y, z))
|
def pixel_to_xyz(self, pixels, max_depth=25):
'Given pixel coordinates find out xyz in camera frame\n\n Args:\n pixels (np array): 4 x N_features\n max_depth (int): set max pixel depth in meter\n Returns:\n np array: xyz coordinates of pixels in homogenous coordinates, \n 3 (x,y,z) x N_features\n '
assert (pixels.shape[0] == 4)
d = np.abs((pixels[0] - pixels[2]))
z = (self.fsub / d)
z[(z > max_depth)] = max_depth
u_L = pixels[0]
v_L = pixels[1]
x = (((u_L - self.cu) / self.fsu) * z)
y = (((v_L - self.cv) / self.fsv) * z)
return np.vstack((x, y, z))<|docstring|>Given pixel coordinates find out xyz in camera frame
Args:
pixels (np array): 4 x N_features
max_depth (int): set max pixel depth in meter
Returns:
np array: xyz coordinates of pixels in homogenous coordinates,
3 (x,y,z) x N_features<|endoftext|>
|
2867acf63f000bd0992ed252646964a182236007419d57e3fc8f94ce9bc1b2bc
|
def _is_key_file_encrypted(key_file):
'Detects if a key file is encrypted or not.\n\n Copy of the internal urllib function (urllib3.util.ssl_)'
with open(key_file, 'r') as f:
for line in f:
if ('ENCRYPTED' in line):
return True
return False
|
Detects if a key file is encrypted or not.
Copy of the internal urllib function (urllib3.util.ssl_)
|
httpie/ssl_.py
|
_is_key_file_encrypted
|
10088/httpie
| 2 |
python
|
def _is_key_file_encrypted(key_file):
'Detects if a key file is encrypted or not.\n\n Copy of the internal urllib function (urllib3.util.ssl_)'
with open(key_file, 'r') as f:
for line in f:
if ('ENCRYPTED' in line):
return True
return False
|
def _is_key_file_encrypted(key_file):
'Detects if a key file is encrypted or not.\n\n Copy of the internal urllib function (urllib3.util.ssl_)'
with open(key_file, 'r') as f:
for line in f:
if ('ENCRYPTED' in line):
return True
return False<|docstring|>Detects if a key file is encrypted or not.
Copy of the internal urllib function (urllib3.util.ssl_)<|endoftext|>
|
65be57d7f132c51f750ebc9f1f6012ab2e228fa75908df09fff7e075c53d0a55
|
def to_raw_cert(self):
"Synthesize a requests-compatible (2-item tuple of cert and key file)\n object from HTTPie's internal representation of a certificate."
return (self.cert_file, self.key_file)
|
Synthesize a requests-compatible (2-item tuple of cert and key file)
object from HTTPie's internal representation of a certificate.
|
httpie/ssl_.py
|
to_raw_cert
|
10088/httpie
| 2 |
python
|
def to_raw_cert(self):
"Synthesize a requests-compatible (2-item tuple of cert and key file)\n object from HTTPie's internal representation of a certificate."
return (self.cert_file, self.key_file)
|
def to_raw_cert(self):
"Synthesize a requests-compatible (2-item tuple of cert and key file)\n object from HTTPie's internal representation of a certificate."
return (self.cert_file, self.key_file)<|docstring|>Synthesize a requests-compatible (2-item tuple of cert and key file)
object from HTTPie's internal representation of a certificate.<|endoftext|>
|
588d4080a890487d7d4d94fb73d3ace3f10266f09181e185095be7f5546bd881
|
@lru_cache
def get_user_model():
'\n use settings.SLACKBOT_USER_MODEL to use your own model for Slack users\n pretty much like django.contrib.auth.get_user_model\n '
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
return apps.get_model(settings.SLACKBOT_USER_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("SLACKBOT_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(("SLACKBOT_USER_MODEL refers to model '%s' that has not been installed" % settings.SLACKBOT_USER_MODEL))
|
use settings.SLACKBOT_USER_MODEL to use your own model for Slack users
pretty much like django.contrib.auth.get_user_model
|
slackbot/__init__.py
|
get_user_model
|
surface-security/django-slackbot
| 1 |
python
|
@lru_cache
def get_user_model():
'\n use settings.SLACKBOT_USER_MODEL to use your own model for Slack users\n pretty much like django.contrib.auth.get_user_model\n '
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
return apps.get_model(settings.SLACKBOT_USER_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("SLACKBOT_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(("SLACKBOT_USER_MODEL refers to model '%s' that has not been installed" % settings.SLACKBOT_USER_MODEL))
|
@lru_cache
def get_user_model():
'\n use settings.SLACKBOT_USER_MODEL to use your own model for Slack users\n pretty much like django.contrib.auth.get_user_model\n '
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
return apps.get_model(settings.SLACKBOT_USER_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("SLACKBOT_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(("SLACKBOT_USER_MODEL refers to model '%s' that has not been installed" % settings.SLACKBOT_USER_MODEL))<|docstring|>use settings.SLACKBOT_USER_MODEL to use your own model for Slack users
pretty much like django.contrib.auth.get_user_model<|endoftext|>
|
021354a1db5b318fe0ff5f8ef17049c9cbf70d2ec8b4f844349edb234c7ab113
|
@staticmethod
def is_supported(session: Session):
'Whether search is implemented'
return session.has_fragment(SearchHandler)
|
Whether search is implemented
|
novelsave/client/bots/discord/endpoints/search.py
|
is_supported
|
mensch272/novelsave
| 12 |
python
|
@staticmethod
def is_supported(session: Session):
return session.has_fragment(SearchHandler)
|
@staticmethod
def is_supported(session: Session):
return session.has_fragment(SearchHandler)<|docstring|>Whether search is implemented<|endoftext|>
|
abb85fbfb2854362bdcfa771932452e27a2d81ff083a8824c80591ed2cc78a17
|
@commands.command()
async def search(self, ctx: commands.Context, *, words):
'Start a search task'
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
(await session.run(ctx, SearchHandler.search, words))
|
Start a search task
|
novelsave/client/bots/discord/endpoints/search.py
|
search
|
mensch272/novelsave
| 12 |
python
|
@commands.command()
async def search(self, ctx: commands.Context, *, words):
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
(await session.run(ctx, SearchHandler.search, words))
|
@commands.command()
async def search(self, ctx: commands.Context, *, words):
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
(await session.run(ctx, SearchHandler.search, words))<|docstring|>Start a search task<|endoftext|>
|
9e99dfe64898045a442ff9d4ee0eea388b0ada3d8860e61cbb206a26c3ae0814
|
@commands.command()
async def select(self, ctx: commands.Context, num: int):
'Select from the provided search results'
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
if (not session.get(SearchHandler.is_select)()):
(await ctx.send('Session does not require selection.'))
return
if session.get(SearchHandler.is_novel_select)():
(await session.run(ctx, SearchHandler.select_novel, (num - 1)))
else:
url = session.get(SearchHandler.select_source)((num - 1))
(await ctx.send(f'{url} selected.'))
(await ctx.invoke(session.bot.get_command('download'), url))
|
Select from the provided search results
|
novelsave/client/bots/discord/endpoints/search.py
|
select
|
mensch272/novelsave
| 12 |
python
|
@commands.command()
async def select(self, ctx: commands.Context, num: int):
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
if (not session.get(SearchHandler.is_select)()):
(await ctx.send('Session does not require selection.'))
return
if session.get(SearchHandler.is_novel_select)():
(await session.run(ctx, SearchHandler.select_novel, (num - 1)))
else:
url = session.get(SearchHandler.select_source)((num - 1))
(await ctx.send(f'{url} selected.'))
(await ctx.invoke(session.bot.get_command('download'), url))
|
@commands.command()
async def select(self, ctx: commands.Context, num: int):
session = self.session_handler.get_or_create(ctx)
if (not self.is_supported(session)):
(await ctx.send(self.unsupported))
return
if (not session.get(SearchHandler.is_select)()):
(await ctx.send('Session does not require selection.'))
return
if session.get(SearchHandler.is_novel_select)():
(await session.run(ctx, SearchHandler.select_novel, (num - 1)))
else:
url = session.get(SearchHandler.select_source)((num - 1))
(await ctx.send(f'{url} selected.'))
(await ctx.invoke(session.bot.get_command('download'), url))<|docstring|>Select from the provided search results<|endoftext|>
|
19dcb62af7a966a4bba310aef9146ea85a18664b337b8af01d06882f2115707d
|
def __init__(self, read_file: Union[(str, Path)], file_headers=True):
"\n This object takes a read file directory as its core argument. By default file headers are turned on, but if a\n file doesn't have any file headers users can turn file headers of.\n\n This object has the following attributes:\n\n file_name: The file name of the read file minus any file extension\n\n sheet_column_lengths: The number of columns of data that exist with each given sheet. Keep in mind, this\n operation assumes that even if you don't have column headers, that the first row of every column does have\n content in it as this determines the length.\n\n sheet_row_lengths: The number of rows of data for the columns within a given sheet. Keep in mind, this operation\n assumes equal length columns and only takes the length of the first column in each sheet as the true value\n for all other columns within the sheet.\n\n sheet_headers: If headers are set to true, this will isolate the first row of each given column for each given\n sheet and use these values as the header value for a given column in a given sheet.\n\n sheet_data: This contains the data from all the sheets in a sheet-column-row format.\n\n :param read_file: The xlsx file path you want to read in to an object\n :type read_file: str | Path\n\n :param file_headers: If the xlsx file has file headers or not\n :type file_headers: bool\n "
self._read_file = validate_path(read_file)
self._workbook = load_workbook(self._read_file)
self._file_headers = file_headers
self.file_name = self._read_file.stem
self.sheet_names = self._set_sheet_names()
self.sheet_col_count = [sheet.max_column for sheet in self._workbook.worksheets]
self.sheet_row_count = [sheet.max_row for sheet in self._workbook.worksheets]
self.sheet_headers = self._set_sheet_header_list()
self.sheet_data = self._set_sheet_data()
|
This object takes a read file directory as its core argument. By default file headers are turned on, but if a
file doesn't have any file headers users can turn file headers of.
This object has the following attributes:
file_name: The file name of the read file minus any file extension
sheet_column_lengths: The number of columns of data that exist with each given sheet. Keep in mind, this
operation assumes that even if you don't have column headers, that the first row of every column does have
content in it as this determines the length.
sheet_row_lengths: The number of rows of data for the columns within a given sheet. Keep in mind, this operation
assumes equal length columns and only takes the length of the first column in each sheet as the true value
for all other columns within the sheet.
sheet_headers: If headers are set to true, this will isolate the first row of each given column for each given
sheet and use these values as the header value for a given column in a given sheet.
sheet_data: This contains the data from all the sheets in a sheet-column-row format.
:param read_file: The xlsx file path you want to read in to an object
:type read_file: str | Path
:param file_headers: If the xlsx file has file headers or not
:type file_headers: bool
|
xlsxObject/XlsxObject.py
|
__init__
|
sbaker-dev/xslxObject
| 0 |
python
|
def __init__(self, read_file: Union[(str, Path)], file_headers=True):
"\n This object takes a read file directory as its core argument. By default file headers are turned on, but if a\n file doesn't have any file headers users can turn file headers of.\n\n This object has the following attributes:\n\n file_name: The file name of the read file minus any file extension\n\n sheet_column_lengths: The number of columns of data that exist with each given sheet. Keep in mind, this\n operation assumes that even if you don't have column headers, that the first row of every column does have\n content in it as this determines the length.\n\n sheet_row_lengths: The number of rows of data for the columns within a given sheet. Keep in mind, this operation\n assumes equal length columns and only takes the length of the first column in each sheet as the true value\n for all other columns within the sheet.\n\n sheet_headers: If headers are set to true, this will isolate the first row of each given column for each given\n sheet and use these values as the header value for a given column in a given sheet.\n\n sheet_data: This contains the data from all the sheets in a sheet-column-row format.\n\n :param read_file: The xlsx file path you want to read in to an object\n :type read_file: str | Path\n\n :param file_headers: If the xlsx file has file headers or not\n :type file_headers: bool\n "
self._read_file = validate_path(read_file)
self._workbook = load_workbook(self._read_file)
self._file_headers = file_headers
self.file_name = self._read_file.stem
self.sheet_names = self._set_sheet_names()
self.sheet_col_count = [sheet.max_column for sheet in self._workbook.worksheets]
self.sheet_row_count = [sheet.max_row for sheet in self._workbook.worksheets]
self.sheet_headers = self._set_sheet_header_list()
self.sheet_data = self._set_sheet_data()
|
def __init__(self, read_file: Union[(str, Path)], file_headers=True):
"\n This object takes a read file directory as its core argument. By default file headers are turned on, but if a\n file doesn't have any file headers users can turn file headers of.\n\n This object has the following attributes:\n\n file_name: The file name of the read file minus any file extension\n\n sheet_column_lengths: The number of columns of data that exist with each given sheet. Keep in mind, this\n operation assumes that even if you don't have column headers, that the first row of every column does have\n content in it as this determines the length.\n\n sheet_row_lengths: The number of rows of data for the columns within a given sheet. Keep in mind, this operation\n assumes equal length columns and only takes the length of the first column in each sheet as the true value\n for all other columns within the sheet.\n\n sheet_headers: If headers are set to true, this will isolate the first row of each given column for each given\n sheet and use these values as the header value for a given column in a given sheet.\n\n sheet_data: This contains the data from all the sheets in a sheet-column-row format.\n\n :param read_file: The xlsx file path you want to read in to an object\n :type read_file: str | Path\n\n :param file_headers: If the xlsx file has file headers or not\n :type file_headers: bool\n "
self._read_file = validate_path(read_file)
self._workbook = load_workbook(self._read_file)
self._file_headers = file_headers
self.file_name = self._read_file.stem
self.sheet_names = self._set_sheet_names()
self.sheet_col_count = [sheet.max_column for sheet in self._workbook.worksheets]
self.sheet_row_count = [sheet.max_row for sheet in self._workbook.worksheets]
self.sheet_headers = self._set_sheet_header_list()
self.sheet_data = self._set_sheet_data()<|docstring|>This object takes a read file directory as its core argument. By default file headers are turned on, but if a
file doesn't have any file headers users can turn file headers of.
This object has the following attributes:
file_name: The file name of the read file minus any file extension
sheet_column_lengths: The number of columns of data that exist with each given sheet. Keep in mind, this
operation assumes that even if you don't have column headers, that the first row of every column does have
content in it as this determines the length.
sheet_row_lengths: The number of rows of data for the columns within a given sheet. Keep in mind, this operation
assumes equal length columns and only takes the length of the first column in each sheet as the true value
for all other columns within the sheet.
sheet_headers: If headers are set to true, this will isolate the first row of each given column for each given
sheet and use these values as the header value for a given column in a given sheet.
sheet_data: This contains the data from all the sheets in a sheet-column-row format.
:param read_file: The xlsx file path you want to read in to an object
:type read_file: str | Path
:param file_headers: If the xlsx file has file headers or not
:type file_headers: bool<|endoftext|>
|
f4288174a5f1cd9c2b8406617441a472d31e4f1b61fb6c359510fa3ca0038ffc
|
def __repr__(self):
'Human readable print'
return f'{self.file_name}.xlsx with {len(self.sheet_names)} sheets'
|
Human readable print
|
xlsxObject/XlsxObject.py
|
__repr__
|
sbaker-dev/xslxObject
| 0 |
python
|
def __repr__(self):
return f'{self.file_name}.xlsx with {len(self.sheet_names)} sheets'
|
def __repr__(self):
return f'{self.file_name}.xlsx with {len(self.sheet_names)} sheets'<|docstring|>Human readable print<|endoftext|>
|
226a9c7eb2322b15b94f1757084161268fa404fb2c058a1544678764ab1a9f78
|
def __getitem__(self, item):
'Extract the data '
if isinstance(item, int):
return self.sheet_data[item]
else:
raise TypeError(f'Getting sheet data via __getitem__ requires an item yet was passed {type(item)}')
|
Extract the data
|
xlsxObject/XlsxObject.py
|
__getitem__
|
sbaker-dev/xslxObject
| 0 |
python
|
def __getitem__(self, item):
' '
if isinstance(item, int):
return self.sheet_data[item]
else:
raise TypeError(f'Getting sheet data via __getitem__ requires an item yet was passed {type(item)}')
|
def __getitem__(self, item):
' '
if isinstance(item, int):
return self.sheet_data[item]
else:
raise TypeError(f'Getting sheet data via __getitem__ requires an item yet was passed {type(item)}')<|docstring|>Extract the data<|endoftext|>
|
49d821d44bf6c7b19f6d2362186a31a1c5f6d513d04f4a8c97f870c13558bb50
|
def _set_sheet_names(self) -> List[str]:
'\n This extracts the sheets titles from the xlsx workbook\n '
return [sheet.title for sheet in self._workbook.worksheets]
|
This extracts the sheets titles from the xlsx workbook
|
xlsxObject/XlsxObject.py
|
_set_sheet_names
|
sbaker-dev/xslxObject
| 0 |
python
|
def _set_sheet_names(self) -> List[str]:
'\n \n '
return [sheet.title for sheet in self._workbook.worksheets]
|
def _set_sheet_names(self) -> List[str]:
'\n \n '
return [sheet.title for sheet in self._workbook.worksheets]<|docstring|>This extracts the sheets titles from the xlsx workbook<|endoftext|>
|
9ca5c274f6e73bebd20f4ac3a2b501b07654d29b02d88347851ed3e7b414cd8e
|
def _set_sheet_header_list(self) -> List[List[str]]:
'\n Isolates headers if they exist, else creates dummy header names for each sheet in workbook\n '
if self._file_headers:
sheet_headers = [[sheet[f'{get_column_letter(i)}{1}'].value for i in range(1, (sheet_length + 1))] for (sheet_length, sheet) in zip(self.sheet_col_count, self._workbook.worksheets)]
else:
sheet_headers = [[f'Var{i}' for i in range(1, sheet_length)] for sheet_length in self.sheet_col_count]
return sheet_headers
|
Isolates headers if they exist, else creates dummy header names for each sheet in workbook
|
xlsxObject/XlsxObject.py
|
_set_sheet_header_list
|
sbaker-dev/xslxObject
| 0 |
python
|
def _set_sheet_header_list(self) -> List[List[str]]:
'\n \n '
if self._file_headers:
sheet_headers = [[sheet[f'{get_column_letter(i)}{1}'].value for i in range(1, (sheet_length + 1))] for (sheet_length, sheet) in zip(self.sheet_col_count, self._workbook.worksheets)]
else:
sheet_headers = [[f'Var{i}' for i in range(1, sheet_length)] for sheet_length in self.sheet_col_count]
return sheet_headers
|
def _set_sheet_header_list(self) -> List[List[str]]:
'\n \n '
if self._file_headers:
sheet_headers = [[sheet[f'{get_column_letter(i)}{1}'].value for i in range(1, (sheet_length + 1))] for (sheet_length, sheet) in zip(self.sheet_col_count, self._workbook.worksheets)]
else:
sheet_headers = [[f'Var{i}' for i in range(1, sheet_length)] for sheet_length in self.sheet_col_count]
return sheet_headers<|docstring|>Isolates headers if they exist, else creates dummy header names for each sheet in workbook<|endoftext|>
|
2e4ecaa751771d114714a2aee7a051e5bd71b006d15f18e2168895add21368dd
|
def _set_sheet_data(self) -> List[SheetData]:
'\n Iterator that will work through the sheets by using the column and row lengths, isolating all the content\n within a given sheet. This means that the end result is a nested list of sheet-column-row.\n '
return [self._set_data(sheet, sheet_index) for (sheet_index, sheet) in enumerate(self._workbook.worksheets)]
|
Iterator that will work through the sheets by using the column and row lengths, isolating all the content
within a given sheet. This means that the end result is a nested list of sheet-column-row.
|
xlsxObject/XlsxObject.py
|
_set_sheet_data
|
sbaker-dev/xslxObject
| 0 |
python
|
def _set_sheet_data(self) -> List[SheetData]:
'\n Iterator that will work through the sheets by using the column and row lengths, isolating all the content\n within a given sheet. This means that the end result is a nested list of sheet-column-row.\n '
return [self._set_data(sheet, sheet_index) for (sheet_index, sheet) in enumerate(self._workbook.worksheets)]
|
def _set_sheet_data(self) -> List[SheetData]:
'\n Iterator that will work through the sheets by using the column and row lengths, isolating all the content\n within a given sheet. This means that the end result is a nested list of sheet-column-row.\n '
return [self._set_data(sheet, sheet_index) for (sheet_index, sheet) in enumerate(self._workbook.worksheets)]<|docstring|>Iterator that will work through the sheets by using the column and row lengths, isolating all the content
within a given sheet. This means that the end result is a nested list of sheet-column-row.<|endoftext|>
|
9cd350564ca674be68bdaa3da3d4c3ec89b28f893ff7cab51ca9da1abc3519ec
|
def _set_data(self, sheet, sheet_index: int) -> SheetData:
'\n This sets the data for a given sheet by taking the row and column lengths and then iterating through the sheets\n columns and rows by using range indexing.\n\n NOTE\n ----\n openpyxl requires base 1 not base 0 hence range\n '
if self._file_headers:
row_start = 2
else:
row_start = 1
row_end = (self.sheet_row_count[sheet_index] + 1)
sheet_data = [[sheet[f'{get_column_letter(col_i)}{row_i}'].value for row_i in range(row_start, row_end)] for col_i in range(1, (self.sheet_col_count[sheet_index] + 1))]
return SheetData(self.sheet_names[sheet_index], self.sheet_headers[sheet_index], sheet_data)
|
This sets the data for a given sheet by taking the row and column lengths and then iterating through the sheets
columns and rows by using range indexing.
NOTE
----
openpyxl requires base 1 not base 0 hence range
|
xlsxObject/XlsxObject.py
|
_set_data
|
sbaker-dev/xslxObject
| 0 |
python
|
def _set_data(self, sheet, sheet_index: int) -> SheetData:
'\n This sets the data for a given sheet by taking the row and column lengths and then iterating through the sheets\n columns and rows by using range indexing.\n\n NOTE\n ----\n openpyxl requires base 1 not base 0 hence range\n '
if self._file_headers:
row_start = 2
else:
row_start = 1
row_end = (self.sheet_row_count[sheet_index] + 1)
sheet_data = [[sheet[f'{get_column_letter(col_i)}{row_i}'].value for row_i in range(row_start, row_end)] for col_i in range(1, (self.sheet_col_count[sheet_index] + 1))]
return SheetData(self.sheet_names[sheet_index], self.sheet_headers[sheet_index], sheet_data)
|
def _set_data(self, sheet, sheet_index: int) -> SheetData:
'\n This sets the data for a given sheet by taking the row and column lengths and then iterating through the sheets\n columns and rows by using range indexing.\n\n NOTE\n ----\n openpyxl requires base 1 not base 0 hence range\n '
if self._file_headers:
row_start = 2
else:
row_start = 1
row_end = (self.sheet_row_count[sheet_index] + 1)
sheet_data = [[sheet[f'{get_column_letter(col_i)}{row_i}'].value for row_i in range(row_start, row_end)] for col_i in range(1, (self.sheet_col_count[sheet_index] + 1))]
return SheetData(self.sheet_names[sheet_index], self.sheet_headers[sheet_index], sheet_data)<|docstring|>This sets the data for a given sheet by taking the row and column lengths and then iterating through the sheets
columns and rows by using range indexing.
NOTE
----
openpyxl requires base 1 not base 0 hence range<|endoftext|>
|
0465e52f387ad8fd3528ad5a711c45a8e6a68feb62c6275d92141b5a46f76c46
|
def _area_of_pixel(pixel_size, center_lat):
'Calculate m^2 area of a wgs84 square pixel.\n\n Adapted from: https://gis.stackexchange.com/a/127327/2397\n\n Args:\n pixel_size (float): length of side of pixel in degrees.\n center_lat (float): latitude of the center of the pixel. Note this\n value +/- half the `pixel-size` must not exceed 90/-90 degrees\n latitude or an invalid area will be calculated.\n\n Returns:\n Area of square pixel of side length `pixel_size` centered at\n `center_lat` in m^2.\n\n '
a = 6378137
b = 6356752.3142
e = math.sqrt((1 - ((b / a) ** 2)))
area_list = []
for f in [(center_lat + (pixel_size / 2)), (center_lat - (pixel_size / 2))]:
zm = (1 - (e * math.sin(math.radians(f))))
zp = (1 + (e * math.sin(math.radians(f))))
area_list.append(((math.pi * (b ** 2)) * ((math.log((zp / zm)) / (2 * e)) + (math.sin(math.radians(f)) / (zp * zm)))))
return abs(((pixel_size / 360.0) * (area_list[0] - area_list[1])))
|
Calculate m^2 area of a wgs84 square pixel.
Adapted from: https://gis.stackexchange.com/a/127327/2397
Args:
pixel_size (float): length of side of pixel in degrees.
center_lat (float): latitude of the center of the pixel. Note this
value +/- half the `pixel-size` must not exceed 90/-90 degrees
latitude or an invalid area will be calculated.
Returns:
Area of square pixel of side length `pixel_size` centered at
`center_lat` in m^2.
|
raster_stats.py
|
_area_of_pixel
|
richpsharp/raster_calculations
| 2 |
python
|
def _area_of_pixel(pixel_size, center_lat):
'Calculate m^2 area of a wgs84 square pixel.\n\n Adapted from: https://gis.stackexchange.com/a/127327/2397\n\n Args:\n pixel_size (float): length of side of pixel in degrees.\n center_lat (float): latitude of the center of the pixel. Note this\n value +/- half the `pixel-size` must not exceed 90/-90 degrees\n latitude or an invalid area will be calculated.\n\n Returns:\n Area of square pixel of side length `pixel_size` centered at\n `center_lat` in m^2.\n\n '
a = 6378137
b = 6356752.3142
e = math.sqrt((1 - ((b / a) ** 2)))
area_list = []
for f in [(center_lat + (pixel_size / 2)), (center_lat - (pixel_size / 2))]:
zm = (1 - (e * math.sin(math.radians(f))))
zp = (1 + (e * math.sin(math.radians(f))))
area_list.append(((math.pi * (b ** 2)) * ((math.log((zp / zm)) / (2 * e)) + (math.sin(math.radians(f)) / (zp * zm)))))
return abs(((pixel_size / 360.0) * (area_list[0] - area_list[1])))
|
def _area_of_pixel(pixel_size, center_lat):
'Calculate m^2 area of a wgs84 square pixel.\n\n Adapted from: https://gis.stackexchange.com/a/127327/2397\n\n Args:\n pixel_size (float): length of side of pixel in degrees.\n center_lat (float): latitude of the center of the pixel. Note this\n value +/- half the `pixel-size` must not exceed 90/-90 degrees\n latitude or an invalid area will be calculated.\n\n Returns:\n Area of square pixel of side length `pixel_size` centered at\n `center_lat` in m^2.\n\n '
a = 6378137
b = 6356752.3142
e = math.sqrt((1 - ((b / a) ** 2)))
area_list = []
for f in [(center_lat + (pixel_size / 2)), (center_lat - (pixel_size / 2))]:
zm = (1 - (e * math.sin(math.radians(f))))
zp = (1 + (e * math.sin(math.radians(f))))
area_list.append(((math.pi * (b ** 2)) * ((math.log((zp / zm)) / (2 * e)) + (math.sin(math.radians(f)) / (zp * zm)))))
return abs(((pixel_size / 360.0) * (area_list[0] - area_list[1])))<|docstring|>Calculate m^2 area of a wgs84 square pixel.
Adapted from: https://gis.stackexchange.com/a/127327/2397
Args:
pixel_size (float): length of side of pixel in degrees.
center_lat (float): latitude of the center of the pixel. Note this
value +/- half the `pixel-size` must not exceed 90/-90 degrees
latitude or an invalid area will be calculated.
Returns:
Area of square pixel of side length `pixel_size` centered at
`center_lat` in m^2.<|endoftext|>
|
d5a3160913f88f1d90860499788e4954e8e692933477c1e1818c7b4a7865d078
|
def main():
'Entry point.'
parser = argparse.ArgumentParser(description='Calculate raster stats.')
parser.add_argument('raster_path', help='path to raster')
args = parser.parse_args()
raster_info = get_raster_info(args.raster_path)
raster_srs = osr.SpatialReference()
raster_srs.ImportFromWkt(raster_info['projection_wkt'])
LOGGER.debug(f'projected: {raster_srs.IsProjected()}')
pixel_area = abs(numpy.prod(raster_info['pixel_size']))
target_csv_path = f'{os.path.basename(os.path.splitext(args.raster_path)[0])}.csv'
with open(target_csv_path, 'w') as csv_file:
csv_file.write('pixel_value,pixel_count,area (raster units)')
if (not raster_srs.IsProjected()):
csv_file.write(',area m^2')
csv_file.write('\n')
pixel_stat_dict = collections.defaultdict(int)
area_stat_dict = collections.defaultdict(float)
for (offset_info, block_data) in iterblocks((args.raster_path, 1)):
(unique_vals, frequency) = numpy.unique(block_data, return_counts=True)
for (val, count) in zip(unique_vals, frequency):
pixel_stat_dict[val] += count
if (not raster_srs.IsProjected()):
n_rows = offset_info['win_ysize']
pixel_height = abs(raster_info['geotransform'][5])
(_, y_origin) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, offset_info['yoff'])
(_, y_bot) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, (offset_info['yoff'] + offset_info['win_ysize']))
miny = (y_origin + (pixel_height / 2))
maxy = (y_bot + (pixel_height / 2))
lat_vals = numpy.linspace(maxy, miny, n_rows)
deg_area_vals = numpy.expand_dims(numpy.array([_area_of_pixel(pixel_height, val) for val in lat_vals]), axis=1)
for val in unique_vals:
area_stat_dict[val] += numpy.sum(((block_data == val) * deg_area_vals))
with open(target_csv_path, 'a') as csv_file:
for val in sorted(pixel_stat_dict):
csv_file.write(f'{val},{pixel_stat_dict[val]},{(pixel_area * pixel_stat_dict[val])}')
if (not raster_srs.IsProjected()):
csv_file.write(f',{area_stat_dict[val]}')
csv_file.write('\n')
|
Entry point.
|
raster_stats.py
|
main
|
richpsharp/raster_calculations
| 2 |
python
|
def main():
parser = argparse.ArgumentParser(description='Calculate raster stats.')
parser.add_argument('raster_path', help='path to raster')
args = parser.parse_args()
raster_info = get_raster_info(args.raster_path)
raster_srs = osr.SpatialReference()
raster_srs.ImportFromWkt(raster_info['projection_wkt'])
LOGGER.debug(f'projected: {raster_srs.IsProjected()}')
pixel_area = abs(numpy.prod(raster_info['pixel_size']))
target_csv_path = f'{os.path.basename(os.path.splitext(args.raster_path)[0])}.csv'
with open(target_csv_path, 'w') as csv_file:
csv_file.write('pixel_value,pixel_count,area (raster units)')
if (not raster_srs.IsProjected()):
csv_file.write(',area m^2')
csv_file.write('\n')
pixel_stat_dict = collections.defaultdict(int)
area_stat_dict = collections.defaultdict(float)
for (offset_info, block_data) in iterblocks((args.raster_path, 1)):
(unique_vals, frequency) = numpy.unique(block_data, return_counts=True)
for (val, count) in zip(unique_vals, frequency):
pixel_stat_dict[val] += count
if (not raster_srs.IsProjected()):
n_rows = offset_info['win_ysize']
pixel_height = abs(raster_info['geotransform'][5])
(_, y_origin) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, offset_info['yoff'])
(_, y_bot) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, (offset_info['yoff'] + offset_info['win_ysize']))
miny = (y_origin + (pixel_height / 2))
maxy = (y_bot + (pixel_height / 2))
lat_vals = numpy.linspace(maxy, miny, n_rows)
deg_area_vals = numpy.expand_dims(numpy.array([_area_of_pixel(pixel_height, val) for val in lat_vals]), axis=1)
for val in unique_vals:
area_stat_dict[val] += numpy.sum(((block_data == val) * deg_area_vals))
with open(target_csv_path, 'a') as csv_file:
for val in sorted(pixel_stat_dict):
csv_file.write(f'{val},{pixel_stat_dict[val]},{(pixel_area * pixel_stat_dict[val])}')
if (not raster_srs.IsProjected()):
csv_file.write(f',{area_stat_dict[val]}')
csv_file.write('\n')
|
def main():
parser = argparse.ArgumentParser(description='Calculate raster stats.')
parser.add_argument('raster_path', help='path to raster')
args = parser.parse_args()
raster_info = get_raster_info(args.raster_path)
raster_srs = osr.SpatialReference()
raster_srs.ImportFromWkt(raster_info['projection_wkt'])
LOGGER.debug(f'projected: {raster_srs.IsProjected()}')
pixel_area = abs(numpy.prod(raster_info['pixel_size']))
target_csv_path = f'{os.path.basename(os.path.splitext(args.raster_path)[0])}.csv'
with open(target_csv_path, 'w') as csv_file:
csv_file.write('pixel_value,pixel_count,area (raster units)')
if (not raster_srs.IsProjected()):
csv_file.write(',area m^2')
csv_file.write('\n')
pixel_stat_dict = collections.defaultdict(int)
area_stat_dict = collections.defaultdict(float)
for (offset_info, block_data) in iterblocks((args.raster_path, 1)):
(unique_vals, frequency) = numpy.unique(block_data, return_counts=True)
for (val, count) in zip(unique_vals, frequency):
pixel_stat_dict[val] += count
if (not raster_srs.IsProjected()):
n_rows = offset_info['win_ysize']
pixel_height = abs(raster_info['geotransform'][5])
(_, y_origin) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, offset_info['yoff'])
(_, y_bot) = gdal.ApplyGeoTransform(raster_info['geotransform'], 0, (offset_info['yoff'] + offset_info['win_ysize']))
miny = (y_origin + (pixel_height / 2))
maxy = (y_bot + (pixel_height / 2))
lat_vals = numpy.linspace(maxy, miny, n_rows)
deg_area_vals = numpy.expand_dims(numpy.array([_area_of_pixel(pixel_height, val) for val in lat_vals]), axis=1)
for val in unique_vals:
area_stat_dict[val] += numpy.sum(((block_data == val) * deg_area_vals))
with open(target_csv_path, 'a') as csv_file:
for val in sorted(pixel_stat_dict):
csv_file.write(f'{val},{pixel_stat_dict[val]},{(pixel_area * pixel_stat_dict[val])}')
if (not raster_srs.IsProjected()):
csv_file.write(f',{area_stat_dict[val]}')
csv_file.write('\n')<|docstring|>Entry point.<|endoftext|>
|
031115dda8db74db8db2780913caa1b5404186286213dc7ca3f665e498e95262
|
@pytest.fixture()
def vera_component_factory():
'Return a factory for initializing the vera component.'
with patch('pyvera.init_controller') as init_controller_mock:
(yield ComponentFactory(init_controller_mock))
|
Return a factory for initializing the vera component.
|
tests/components/vera/conftest.py
|
vera_component_factory
|
nferreyra/home-assistant
| 23 |
python
|
@pytest.fixture()
def vera_component_factory():
with patch('pyvera.init_controller') as init_controller_mock:
(yield ComponentFactory(init_controller_mock))
|
@pytest.fixture()
def vera_component_factory():
with patch('pyvera.init_controller') as init_controller_mock:
(yield ComponentFactory(init_controller_mock))<|docstring|>Return a factory for initializing the vera component.<|endoftext|>
|
bf591bad9f1c083d707716ccb22a68ffc86e80741f3bd040a8870a18aacce05d
|
def list(self, **kwargs):
'List Indicator objects\n\n The list method accepts the following kwargs:\n\n :param list filters: (optional) the filters to apply\n :param str search: (optional) a search keyword to apply for the listing\n :param int first: (optional) return the first n rows from the `after` ID\n or the beginning if not set\n :param str after: (optional) OpenCTI object ID of the first row for pagination\n :param str orderBy: (optional) the field to order the response on\n :param bool orderMode: (optional) either "`asc`" or "`desc`"\n :param list customAttributes: (optional) list of attributes keys to return\n :param bool getAll: (optional) switch to return all entries (be careful to use this without any other filters)\n :param bool withPagination: (optional) switch to use pagination\n\n :return: List of Indicators\n :rtype: list\n '
filters = kwargs.get('filters', None)
search = kwargs.get('search', None)
first = kwargs.get('first', 500)
after = kwargs.get('after', None)
order_by = kwargs.get('orderBy', None)
order_mode = kwargs.get('orderMode', None)
custom_attributes = kwargs.get('customAttributes', None)
get_all = kwargs.get('getAll', False)
with_pagination = kwargs.get('withPagination', False)
if get_all:
first = 100
self.opencti.log('info', (('Listing Indicators with filters ' + json.dumps(filters)) + '.'))
query = (('\n query Indicators($filters: [IndicatorsFiltering], $search: String, $first: Int, $after: ID, $orderBy: IndicatorsOrdering, $orderMode: OrderingMode) {\n indicators(filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {\n edges {\n node {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n pageInfo {\n startCursor\n endCursor\n hasNextPage\n hasPreviousPage\n globalCount\n }\n }\n }\n ')
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
if get_all:
final_data = []
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
while result['data']['indicators']['pageInfo']['hasNextPage']:
after = result['data']['indicators']['pageInfo']['endCursor']
self.opencti.log('info', ('Listing Indicators after ' + after))
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
return final_data
else:
return self.opencti.process_multiple(result['data']['indicators'], with_pagination)
|
List Indicator objects
The list method accepts the following kwargs:
:param list filters: (optional) the filters to apply
:param str search: (optional) a search keyword to apply for the listing
:param int first: (optional) return the first n rows from the `after` ID
or the beginning if not set
:param str after: (optional) OpenCTI object ID of the first row for pagination
:param str orderBy: (optional) the field to order the response on
:param bool orderMode: (optional) either "`asc`" or "`desc`"
:param list customAttributes: (optional) list of attributes keys to return
:param bool getAll: (optional) switch to return all entries (be careful to use this without any other filters)
:param bool withPagination: (optional) switch to use pagination
:return: List of Indicators
:rtype: list
|
pycti/entities/opencti_indicator.py
|
list
|
djds/client-python
| 1 |
python
|
def list(self, **kwargs):
'List Indicator objects\n\n The list method accepts the following kwargs:\n\n :param list filters: (optional) the filters to apply\n :param str search: (optional) a search keyword to apply for the listing\n :param int first: (optional) return the first n rows from the `after` ID\n or the beginning if not set\n :param str after: (optional) OpenCTI object ID of the first row for pagination\n :param str orderBy: (optional) the field to order the response on\n :param bool orderMode: (optional) either "`asc`" or "`desc`"\n :param list customAttributes: (optional) list of attributes keys to return\n :param bool getAll: (optional) switch to return all entries (be careful to use this without any other filters)\n :param bool withPagination: (optional) switch to use pagination\n\n :return: List of Indicators\n :rtype: list\n '
filters = kwargs.get('filters', None)
search = kwargs.get('search', None)
first = kwargs.get('first', 500)
after = kwargs.get('after', None)
order_by = kwargs.get('orderBy', None)
order_mode = kwargs.get('orderMode', None)
custom_attributes = kwargs.get('customAttributes', None)
get_all = kwargs.get('getAll', False)
with_pagination = kwargs.get('withPagination', False)
if get_all:
first = 100
self.opencti.log('info', (('Listing Indicators with filters ' + json.dumps(filters)) + '.'))
query = (('\n query Indicators($filters: [IndicatorsFiltering], $search: String, $first: Int, $after: ID, $orderBy: IndicatorsOrdering, $orderMode: OrderingMode) {\n indicators(filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {\n edges {\n node {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n pageInfo {\n startCursor\n endCursor\n hasNextPage\n hasPreviousPage\n globalCount\n }\n }\n }\n ')
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
if get_all:
final_data = []
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
while result['data']['indicators']['pageInfo']['hasNextPage']:
after = result['data']['indicators']['pageInfo']['endCursor']
self.opencti.log('info', ('Listing Indicators after ' + after))
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
return final_data
else:
return self.opencti.process_multiple(result['data']['indicators'], with_pagination)
|
def list(self, **kwargs):
'List Indicator objects\n\n The list method accepts the following kwargs:\n\n :param list filters: (optional) the filters to apply\n :param str search: (optional) a search keyword to apply for the listing\n :param int first: (optional) return the first n rows from the `after` ID\n or the beginning if not set\n :param str after: (optional) OpenCTI object ID of the first row for pagination\n :param str orderBy: (optional) the field to order the response on\n :param bool orderMode: (optional) either "`asc`" or "`desc`"\n :param list customAttributes: (optional) list of attributes keys to return\n :param bool getAll: (optional) switch to return all entries (be careful to use this without any other filters)\n :param bool withPagination: (optional) switch to use pagination\n\n :return: List of Indicators\n :rtype: list\n '
filters = kwargs.get('filters', None)
search = kwargs.get('search', None)
first = kwargs.get('first', 500)
after = kwargs.get('after', None)
order_by = kwargs.get('orderBy', None)
order_mode = kwargs.get('orderMode', None)
custom_attributes = kwargs.get('customAttributes', None)
get_all = kwargs.get('getAll', False)
with_pagination = kwargs.get('withPagination', False)
if get_all:
first = 100
self.opencti.log('info', (('Listing Indicators with filters ' + json.dumps(filters)) + '.'))
query = (('\n query Indicators($filters: [IndicatorsFiltering], $search: String, $first: Int, $after: ID, $orderBy: IndicatorsOrdering, $orderMode: OrderingMode) {\n indicators(filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {\n edges {\n node {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n pageInfo {\n startCursor\n endCursor\n hasNextPage\n hasPreviousPage\n globalCount\n }\n }\n }\n ')
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
if get_all:
final_data = []
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
while result['data']['indicators']['pageInfo']['hasNextPage']:
after = result['data']['indicators']['pageInfo']['endCursor']
self.opencti.log('info', ('Listing Indicators after ' + after))
result = self.opencti.query(query, {'filters': filters, 'search': search, 'first': first, 'after': after, 'orderBy': order_by, 'orderMode': order_mode})
data = self.opencti.process_multiple(result['data']['indicators'])
final_data = (final_data + data)
return final_data
else:
return self.opencti.process_multiple(result['data']['indicators'], with_pagination)<|docstring|>List Indicator objects
The list method accepts the following kwargs:
:param list filters: (optional) the filters to apply
:param str search: (optional) a search keyword to apply for the listing
:param int first: (optional) return the first n rows from the `after` ID
or the beginning if not set
:param str after: (optional) OpenCTI object ID of the first row for pagination
:param str orderBy: (optional) the field to order the response on
:param bool orderMode: (optional) either "`asc`" or "`desc`"
:param list customAttributes: (optional) list of attributes keys to return
:param bool getAll: (optional) switch to return all entries (be careful to use this without any other filters)
:param bool withPagination: (optional) switch to use pagination
:return: List of Indicators
:rtype: list<|endoftext|>
|
17ad06175c871725c8b2c5fa51d85560f2c1ccc7350800c8fe8b31e5ddf39280
|
def read(self, **kwargs):
'Read an Indicator object\n\n read can be either used with a known OpenCTI entity `id` or by using a\n valid filter to search and return a single Indicator entity or None.\n\n The list method accepts the following kwargs.\n\n Note: either `id` or `filters` is required.\n\n :param str id: the id of the Threat-Actor\n :param list filters: the filters to apply if no id provided\n\n :return: Indicator object\n :rtype: Indicator\n '
id = kwargs.get('id', None)
filters = kwargs.get('filters', None)
custom_attributes = kwargs.get('customAttributes', None)
if (id is not None):
self.opencti.log('info', (('Reading Indicator {' + id) + '}.'))
query = (('\n query Indicator($id: String!) {\n indicator(id: $id) {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n ')
result = self.opencti.query(query, {'id': id})
return self.opencti.process_multiple_fields(result['data']['indicator'])
elif (filters is not None):
result = self.list(filters=filters, customAttributes=custom_attributes)
if (len(result) > 0):
return result[0]
else:
return None
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id or filters')
return None
|
Read an Indicator object
read can be either used with a known OpenCTI entity `id` or by using a
valid filter to search and return a single Indicator entity or None.
The list method accepts the following kwargs.
Note: either `id` or `filters` is required.
:param str id: the id of the Threat-Actor
:param list filters: the filters to apply if no id provided
:return: Indicator object
:rtype: Indicator
|
pycti/entities/opencti_indicator.py
|
read
|
djds/client-python
| 1 |
python
|
def read(self, **kwargs):
'Read an Indicator object\n\n read can be either used with a known OpenCTI entity `id` or by using a\n valid filter to search and return a single Indicator entity or None.\n\n The list method accepts the following kwargs.\n\n Note: either `id` or `filters` is required.\n\n :param str id: the id of the Threat-Actor\n :param list filters: the filters to apply if no id provided\n\n :return: Indicator object\n :rtype: Indicator\n '
id = kwargs.get('id', None)
filters = kwargs.get('filters', None)
custom_attributes = kwargs.get('customAttributes', None)
if (id is not None):
self.opencti.log('info', (('Reading Indicator {' + id) + '}.'))
query = (('\n query Indicator($id: String!) {\n indicator(id: $id) {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n ')
result = self.opencti.query(query, {'id': id})
return self.opencti.process_multiple_fields(result['data']['indicator'])
elif (filters is not None):
result = self.list(filters=filters, customAttributes=custom_attributes)
if (len(result) > 0):
return result[0]
else:
return None
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id or filters')
return None
|
def read(self, **kwargs):
'Read an Indicator object\n\n read can be either used with a known OpenCTI entity `id` or by using a\n valid filter to search and return a single Indicator entity or None.\n\n The list method accepts the following kwargs.\n\n Note: either `id` or `filters` is required.\n\n :param str id: the id of the Threat-Actor\n :param list filters: the filters to apply if no id provided\n\n :return: Indicator object\n :rtype: Indicator\n '
id = kwargs.get('id', None)
filters = kwargs.get('filters', None)
custom_attributes = kwargs.get('customAttributes', None)
if (id is not None):
self.opencti.log('info', (('Reading Indicator {' + id) + '}.'))
query = (('\n query Indicator($id: String!) {\n indicator(id: $id) {\n ' + (custom_attributes if (custom_attributes is not None) else self.properties)) + '\n }\n }\n ')
result = self.opencti.query(query, {'id': id})
return self.opencti.process_multiple_fields(result['data']['indicator'])
elif (filters is not None):
result = self.list(filters=filters, customAttributes=custom_attributes)
if (len(result) > 0):
return result[0]
else:
return None
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id or filters')
return None<|docstring|>Read an Indicator object
read can be either used with a known OpenCTI entity `id` or by using a
valid filter to search and return a single Indicator entity or None.
The list method accepts the following kwargs.
Note: either `id` or `filters` is required.
:param str id: the id of the Threat-Actor
:param list filters: the filters to apply if no id provided
:return: Indicator object
:rtype: Indicator<|endoftext|>
|
706a9a6a7e02223d8ec3aeae5bacc1c0b49091fba11fe17a0bb8733b5dab9113
|
def create(self, **kwargs):
'\n Create an Indicator object\n\n :param str name: the name of the Indicator\n :param str pattern: stix indicator pattern\n :param str x_opencti_main_observable_type: type of the observable\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_id = kwargs.get('stix_id', None)
created_by = kwargs.get('createdBy', None)
object_marking = kwargs.get('objectMarking', None)
object_label = kwargs.get('objectLabel', None)
external_references = kwargs.get('externalReferences', None)
revoked = kwargs.get('revoked', None)
confidence = kwargs.get('confidence', None)
lang = kwargs.get('lang', None)
created = kwargs.get('created', None)
modified = kwargs.get('modified', None)
pattern_type = kwargs.get('pattern_type', None)
pattern_version = kwargs.get('pattern_version', None)
pattern = kwargs.get('pattern', None)
name = kwargs.get('name', None)
description = kwargs.get('description', None)
indicator_types = kwargs.get('indicator_types', None)
valid_from = kwargs.get('valid_from', None)
valid_until = kwargs.get('valid_until', None)
x_opencti_score = kwargs.get('x_opencti_score', 50)
x_opencti_detection = kwargs.get('x_opencti_detection', False)
x_opencti_main_observable_type = kwargs.get('x_opencti_main_observable_type', None)
x_mitre_platforms = kwargs.get('x_mitre_platforms', None)
kill_chain_phases = kwargs.get('killChainPhases', None)
update = kwargs.get('update', False)
if ((name is not None) and (pattern is not None) and (x_opencti_main_observable_type is not None)):
if (x_opencti_main_observable_type == 'File'):
x_opencti_main_observable_type = 'StixFile'
self.opencti.log('info', (('Creating Indicator {' + name) + '}.'))
query = '\n mutation IndicatorAdd($input: IndicatorAddInput) {\n indicatorAdd(input: $input) {\n id\n standard_id\n entity_type\n parent_types\n observables {\n edges {\n node {\n id\n standard_id\n entity_type\n }\n }\n }\n }\n }\n '
if (pattern_type is None):
pattern_type = 'stix2'
result = self.opencti.query(query, {'input': {'stix_id': stix_id, 'createdBy': created_by, 'objectMarking': object_marking, 'objectLabel': object_label, 'externalReferences': external_references, 'revoked': revoked, 'confidence': confidence, 'lang': lang, 'created': created, 'modified': modified, 'pattern_type': pattern_type, 'pattern_version': pattern_version, 'pattern': pattern, 'name': name, 'description': description, 'indicator_types': indicator_types, 'valid_until': valid_until, 'valid_from': valid_from, 'x_opencti_score': x_opencti_score, 'x_opencti_detection': x_opencti_detection, 'x_opencti_main_observable_type': x_opencti_main_observable_type, 'x_mitre_platforms': x_mitre_platforms, 'killChainPhases': kill_chain_phases, 'update': update}})
return self.opencti.process_multiple_fields(result['data']['indicatorAdd'])
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: name or pattern or x_opencti_main_observable_type')
|
Create an Indicator object
:param str name: the name of the Indicator
:param str pattern: stix indicator pattern
:param str x_opencti_main_observable_type: type of the observable
:return: Indicator object
:rtype: Indicator
|
pycti/entities/opencti_indicator.py
|
create
|
djds/client-python
| 1 |
python
|
def create(self, **kwargs):
'\n Create an Indicator object\n\n :param str name: the name of the Indicator\n :param str pattern: stix indicator pattern\n :param str x_opencti_main_observable_type: type of the observable\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_id = kwargs.get('stix_id', None)
created_by = kwargs.get('createdBy', None)
object_marking = kwargs.get('objectMarking', None)
object_label = kwargs.get('objectLabel', None)
external_references = kwargs.get('externalReferences', None)
revoked = kwargs.get('revoked', None)
confidence = kwargs.get('confidence', None)
lang = kwargs.get('lang', None)
created = kwargs.get('created', None)
modified = kwargs.get('modified', None)
pattern_type = kwargs.get('pattern_type', None)
pattern_version = kwargs.get('pattern_version', None)
pattern = kwargs.get('pattern', None)
name = kwargs.get('name', None)
description = kwargs.get('description', None)
indicator_types = kwargs.get('indicator_types', None)
valid_from = kwargs.get('valid_from', None)
valid_until = kwargs.get('valid_until', None)
x_opencti_score = kwargs.get('x_opencti_score', 50)
x_opencti_detection = kwargs.get('x_opencti_detection', False)
x_opencti_main_observable_type = kwargs.get('x_opencti_main_observable_type', None)
x_mitre_platforms = kwargs.get('x_mitre_platforms', None)
kill_chain_phases = kwargs.get('killChainPhases', None)
update = kwargs.get('update', False)
if ((name is not None) and (pattern is not None) and (x_opencti_main_observable_type is not None)):
if (x_opencti_main_observable_type == 'File'):
x_opencti_main_observable_type = 'StixFile'
self.opencti.log('info', (('Creating Indicator {' + name) + '}.'))
query = '\n mutation IndicatorAdd($input: IndicatorAddInput) {\n indicatorAdd(input: $input) {\n id\n standard_id\n entity_type\n parent_types\n observables {\n edges {\n node {\n id\n standard_id\n entity_type\n }\n }\n }\n }\n }\n '
if (pattern_type is None):
pattern_type = 'stix2'
result = self.opencti.query(query, {'input': {'stix_id': stix_id, 'createdBy': created_by, 'objectMarking': object_marking, 'objectLabel': object_label, 'externalReferences': external_references, 'revoked': revoked, 'confidence': confidence, 'lang': lang, 'created': created, 'modified': modified, 'pattern_type': pattern_type, 'pattern_version': pattern_version, 'pattern': pattern, 'name': name, 'description': description, 'indicator_types': indicator_types, 'valid_until': valid_until, 'valid_from': valid_from, 'x_opencti_score': x_opencti_score, 'x_opencti_detection': x_opencti_detection, 'x_opencti_main_observable_type': x_opencti_main_observable_type, 'x_mitre_platforms': x_mitre_platforms, 'killChainPhases': kill_chain_phases, 'update': update}})
return self.opencti.process_multiple_fields(result['data']['indicatorAdd'])
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: name or pattern or x_opencti_main_observable_type')
|
def create(self, **kwargs):
'\n Create an Indicator object\n\n :param str name: the name of the Indicator\n :param str pattern: stix indicator pattern\n :param str x_opencti_main_observable_type: type of the observable\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_id = kwargs.get('stix_id', None)
created_by = kwargs.get('createdBy', None)
object_marking = kwargs.get('objectMarking', None)
object_label = kwargs.get('objectLabel', None)
external_references = kwargs.get('externalReferences', None)
revoked = kwargs.get('revoked', None)
confidence = kwargs.get('confidence', None)
lang = kwargs.get('lang', None)
created = kwargs.get('created', None)
modified = kwargs.get('modified', None)
pattern_type = kwargs.get('pattern_type', None)
pattern_version = kwargs.get('pattern_version', None)
pattern = kwargs.get('pattern', None)
name = kwargs.get('name', None)
description = kwargs.get('description', None)
indicator_types = kwargs.get('indicator_types', None)
valid_from = kwargs.get('valid_from', None)
valid_until = kwargs.get('valid_until', None)
x_opencti_score = kwargs.get('x_opencti_score', 50)
x_opencti_detection = kwargs.get('x_opencti_detection', False)
x_opencti_main_observable_type = kwargs.get('x_opencti_main_observable_type', None)
x_mitre_platforms = kwargs.get('x_mitre_platforms', None)
kill_chain_phases = kwargs.get('killChainPhases', None)
update = kwargs.get('update', False)
if ((name is not None) and (pattern is not None) and (x_opencti_main_observable_type is not None)):
if (x_opencti_main_observable_type == 'File'):
x_opencti_main_observable_type = 'StixFile'
self.opencti.log('info', (('Creating Indicator {' + name) + '}.'))
query = '\n mutation IndicatorAdd($input: IndicatorAddInput) {\n indicatorAdd(input: $input) {\n id\n standard_id\n entity_type\n parent_types\n observables {\n edges {\n node {\n id\n standard_id\n entity_type\n }\n }\n }\n }\n }\n '
if (pattern_type is None):
pattern_type = 'stix2'
result = self.opencti.query(query, {'input': {'stix_id': stix_id, 'createdBy': created_by, 'objectMarking': object_marking, 'objectLabel': object_label, 'externalReferences': external_references, 'revoked': revoked, 'confidence': confidence, 'lang': lang, 'created': created, 'modified': modified, 'pattern_type': pattern_type, 'pattern_version': pattern_version, 'pattern': pattern, 'name': name, 'description': description, 'indicator_types': indicator_types, 'valid_until': valid_until, 'valid_from': valid_from, 'x_opencti_score': x_opencti_score, 'x_opencti_detection': x_opencti_detection, 'x_opencti_main_observable_type': x_opencti_main_observable_type, 'x_mitre_platforms': x_mitre_platforms, 'killChainPhases': kill_chain_phases, 'update': update}})
return self.opencti.process_multiple_fields(result['data']['indicatorAdd'])
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: name or pattern or x_opencti_main_observable_type')<|docstring|>Create an Indicator object
:param str name: the name of the Indicator
:param str pattern: stix indicator pattern
:param str x_opencti_main_observable_type: type of the observable
:return: Indicator object
:rtype: Indicator<|endoftext|>
|
3455d22b39a963e1169d68ff7ad11fb8bb757aa617ddf17ccedc8ad532f2b792
|
def add_stix_cyber_observable(self, **kwargs):
'\n Add a Stix-Cyber-Observable object to Indicator object (based-on)\n\n :param id: the id of the Indicator\n :param indicator: Indicator object\n :param stix_cyber_observable_id: the id of the Stix-Observable\n\n :return: Boolean True if there has been no import error\n '
id = kwargs.get('id', None)
indicator = kwargs.get('indicator', None)
stix_cyber_observable_id = kwargs.get('stix_cyber_observable_id', None)
if ((id is not None) and (stix_cyber_observable_id is not None)):
if (indicator is None):
indicator = self.read(id=id)
if (indicator is None):
self.opencti.log('error', '[opencti_indicator] Cannot add Object Ref, indicator not found')
return False
if (stix_cyber_observable_id in indicator['observablesIds']):
return True
else:
self.opencti.log('info', (((('Adding Stix-Observable {' + stix_cyber_observable_id) + '} to Indicator {') + id) + '}'))
query = '\n mutation StixCoreRelationshipAdd($input: StixCoreRelationshipAddInput!) {\n stixCoreRelationshipAdd(input: $input) {\n id\n }\n }\n '
self.opencti.query(query, {'id': id, 'input': {'fromId': id, 'toId': stix_cyber_observable_id, 'relationship_type': 'based-on'}})
return True
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id and stix cyber_observable_id')
return False
|
Add a Stix-Cyber-Observable object to Indicator object (based-on)
:param id: the id of the Indicator
:param indicator: Indicator object
:param stix_cyber_observable_id: the id of the Stix-Observable
:return: Boolean True if there has been no import error
|
pycti/entities/opencti_indicator.py
|
add_stix_cyber_observable
|
djds/client-python
| 1 |
python
|
def add_stix_cyber_observable(self, **kwargs):
'\n Add a Stix-Cyber-Observable object to Indicator object (based-on)\n\n :param id: the id of the Indicator\n :param indicator: Indicator object\n :param stix_cyber_observable_id: the id of the Stix-Observable\n\n :return: Boolean True if there has been no import error\n '
id = kwargs.get('id', None)
indicator = kwargs.get('indicator', None)
stix_cyber_observable_id = kwargs.get('stix_cyber_observable_id', None)
if ((id is not None) and (stix_cyber_observable_id is not None)):
if (indicator is None):
indicator = self.read(id=id)
if (indicator is None):
self.opencti.log('error', '[opencti_indicator] Cannot add Object Ref, indicator not found')
return False
if (stix_cyber_observable_id in indicator['observablesIds']):
return True
else:
self.opencti.log('info', (((('Adding Stix-Observable {' + stix_cyber_observable_id) + '} to Indicator {') + id) + '}'))
query = '\n mutation StixCoreRelationshipAdd($input: StixCoreRelationshipAddInput!) {\n stixCoreRelationshipAdd(input: $input) {\n id\n }\n }\n '
self.opencti.query(query, {'id': id, 'input': {'fromId': id, 'toId': stix_cyber_observable_id, 'relationship_type': 'based-on'}})
return True
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id and stix cyber_observable_id')
return False
|
def add_stix_cyber_observable(self, **kwargs):
'\n Add a Stix-Cyber-Observable object to Indicator object (based-on)\n\n :param id: the id of the Indicator\n :param indicator: Indicator object\n :param stix_cyber_observable_id: the id of the Stix-Observable\n\n :return: Boolean True if there has been no import error\n '
id = kwargs.get('id', None)
indicator = kwargs.get('indicator', None)
stix_cyber_observable_id = kwargs.get('stix_cyber_observable_id', None)
if ((id is not None) and (stix_cyber_observable_id is not None)):
if (indicator is None):
indicator = self.read(id=id)
if (indicator is None):
self.opencti.log('error', '[opencti_indicator] Cannot add Object Ref, indicator not found')
return False
if (stix_cyber_observable_id in indicator['observablesIds']):
return True
else:
self.opencti.log('info', (((('Adding Stix-Observable {' + stix_cyber_observable_id) + '} to Indicator {') + id) + '}'))
query = '\n mutation StixCoreRelationshipAdd($input: StixCoreRelationshipAddInput!) {\n stixCoreRelationshipAdd(input: $input) {\n id\n }\n }\n '
self.opencti.query(query, {'id': id, 'input': {'fromId': id, 'toId': stix_cyber_observable_id, 'relationship_type': 'based-on'}})
return True
else:
self.opencti.log('error', '[opencti_indicator] Missing parameters: id and stix cyber_observable_id')
return False<|docstring|>Add a Stix-Cyber-Observable object to Indicator object (based-on)
:param id: the id of the Indicator
:param indicator: Indicator object
:param stix_cyber_observable_id: the id of the Stix-Observable
:return: Boolean True if there has been no import error<|endoftext|>
|
3402b4efcf3876844233069297a4c173d310893f703801115fe9a33d48bc6ba0
|
def import_from_stix2(self, **kwargs):
'\n Import an Indicator object from a STIX2 object\n\n :param stixObject: the Stix-Object Indicator\n :param extras: extra dict\n :param bool update: set the update flag on import\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_object = kwargs.get('stixObject', None)
extras = kwargs.get('extras', {})
update = kwargs.get('update', False)
if (stix_object is not None):
return self.create(stix_id=stix_object['id'], createdBy=(extras['created_by_id'] if ('created_by_id' in extras) else None), objectMarking=(extras['object_marking_ids'] if ('object_marking_ids' in extras) else None), objectLabel=(extras['object_label_ids'] if ('object_label_ids' in extras) else []), externalReferences=(extras['external_references_ids'] if ('external_references_ids' in extras) else []), revoked=(stix_object['revoked'] if ('revoked' in stix_object) else None), confidence=(stix_object['confidence'] if ('confidence' in stix_object) else None), lang=(stix_object['lang'] if ('lang' in stix_object) else None), created=(stix_object['created'] if ('created' in stix_object) else None), modified=(stix_object['modified'] if ('modified' in stix_object) else None), pattern_type=(stix_object['pattern_type'] if ('pattern_type' in stix_object) else None), pattern_version=(stix_object['pattern_version'] if ('pattern_version' in stix_object) else None), pattern=(stix_object['pattern'] if ('pattern' in stix_object) else ''), name=(stix_object['name'] if ('name' in stix_object) else stix_object['pattern']), description=(self.opencti.stix2.convert_markdown(stix_object['description']) if ('description' in stix_object) else ''), indicator_types=(stix_object['indicator_types'] if ('indicator_types' in stix_object) else None), valid_from=(stix_object['valid_from'] if ('valid_from' in stix_object) else None), valid_until=(stix_object['valid_until'] if ('valid_until' in stix_object) else None), x_opencti_score=(stix_object['x_opencti_score'] if ('x_opencti_score' in stix_object) else 50), x_opencti_detection=(stix_object['x_opencti_detection'] if ('x_opencti_detection' in stix_object) else False), x_opencti_main_observable_type=(stix_object['x_opencti_main_observable_type'] if ('x_opencti_main_observable_type' in stix_object) else 'Unknown'), killChainPhases=(extras['kill_chain_phases_ids'] if ('kill_chain_phases_ids' in extras) else None), update=update)
else:
self.opencti.log('error', '[opencti_attack_pattern] Missing parameters: stixObject')
|
Import an Indicator object from a STIX2 object
:param stixObject: the Stix-Object Indicator
:param extras: extra dict
:param bool update: set the update flag on import
:return: Indicator object
:rtype: Indicator
|
pycti/entities/opencti_indicator.py
|
import_from_stix2
|
djds/client-python
| 1 |
python
|
def import_from_stix2(self, **kwargs):
'\n Import an Indicator object from a STIX2 object\n\n :param stixObject: the Stix-Object Indicator\n :param extras: extra dict\n :param bool update: set the update flag on import\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_object = kwargs.get('stixObject', None)
extras = kwargs.get('extras', {})
update = kwargs.get('update', False)
if (stix_object is not None):
return self.create(stix_id=stix_object['id'], createdBy=(extras['created_by_id'] if ('created_by_id' in extras) else None), objectMarking=(extras['object_marking_ids'] if ('object_marking_ids' in extras) else None), objectLabel=(extras['object_label_ids'] if ('object_label_ids' in extras) else []), externalReferences=(extras['external_references_ids'] if ('external_references_ids' in extras) else []), revoked=(stix_object['revoked'] if ('revoked' in stix_object) else None), confidence=(stix_object['confidence'] if ('confidence' in stix_object) else None), lang=(stix_object['lang'] if ('lang' in stix_object) else None), created=(stix_object['created'] if ('created' in stix_object) else None), modified=(stix_object['modified'] if ('modified' in stix_object) else None), pattern_type=(stix_object['pattern_type'] if ('pattern_type' in stix_object) else None), pattern_version=(stix_object['pattern_version'] if ('pattern_version' in stix_object) else None), pattern=(stix_object['pattern'] if ('pattern' in stix_object) else ), name=(stix_object['name'] if ('name' in stix_object) else stix_object['pattern']), description=(self.opencti.stix2.convert_markdown(stix_object['description']) if ('description' in stix_object) else ), indicator_types=(stix_object['indicator_types'] if ('indicator_types' in stix_object) else None), valid_from=(stix_object['valid_from'] if ('valid_from' in stix_object) else None), valid_until=(stix_object['valid_until'] if ('valid_until' in stix_object) else None), x_opencti_score=(stix_object['x_opencti_score'] if ('x_opencti_score' in stix_object) else 50), x_opencti_detection=(stix_object['x_opencti_detection'] if ('x_opencti_detection' in stix_object) else False), x_opencti_main_observable_type=(stix_object['x_opencti_main_observable_type'] if ('x_opencti_main_observable_type' in stix_object) else 'Unknown'), killChainPhases=(extras['kill_chain_phases_ids'] if ('kill_chain_phases_ids' in extras) else None), update=update)
else:
self.opencti.log('error', '[opencti_attack_pattern] Missing parameters: stixObject')
|
def import_from_stix2(self, **kwargs):
'\n Import an Indicator object from a STIX2 object\n\n :param stixObject: the Stix-Object Indicator\n :param extras: extra dict\n :param bool update: set the update flag on import\n\n :return: Indicator object\n :rtype: Indicator\n '
stix_object = kwargs.get('stixObject', None)
extras = kwargs.get('extras', {})
update = kwargs.get('update', False)
if (stix_object is not None):
return self.create(stix_id=stix_object['id'], createdBy=(extras['created_by_id'] if ('created_by_id' in extras) else None), objectMarking=(extras['object_marking_ids'] if ('object_marking_ids' in extras) else None), objectLabel=(extras['object_label_ids'] if ('object_label_ids' in extras) else []), externalReferences=(extras['external_references_ids'] if ('external_references_ids' in extras) else []), revoked=(stix_object['revoked'] if ('revoked' in stix_object) else None), confidence=(stix_object['confidence'] if ('confidence' in stix_object) else None), lang=(stix_object['lang'] if ('lang' in stix_object) else None), created=(stix_object['created'] if ('created' in stix_object) else None), modified=(stix_object['modified'] if ('modified' in stix_object) else None), pattern_type=(stix_object['pattern_type'] if ('pattern_type' in stix_object) else None), pattern_version=(stix_object['pattern_version'] if ('pattern_version' in stix_object) else None), pattern=(stix_object['pattern'] if ('pattern' in stix_object) else ), name=(stix_object['name'] if ('name' in stix_object) else stix_object['pattern']), description=(self.opencti.stix2.convert_markdown(stix_object['description']) if ('description' in stix_object) else ), indicator_types=(stix_object['indicator_types'] if ('indicator_types' in stix_object) else None), valid_from=(stix_object['valid_from'] if ('valid_from' in stix_object) else None), valid_until=(stix_object['valid_until'] if ('valid_until' in stix_object) else None), x_opencti_score=(stix_object['x_opencti_score'] if ('x_opencti_score' in stix_object) else 50), x_opencti_detection=(stix_object['x_opencti_detection'] if ('x_opencti_detection' in stix_object) else False), x_opencti_main_observable_type=(stix_object['x_opencti_main_observable_type'] if ('x_opencti_main_observable_type' in stix_object) else 'Unknown'), killChainPhases=(extras['kill_chain_phases_ids'] if ('kill_chain_phases_ids' in extras) else None), update=update)
else:
self.opencti.log('error', '[opencti_attack_pattern] Missing parameters: stixObject')<|docstring|>Import an Indicator object from a STIX2 object
:param stixObject: the Stix-Object Indicator
:param extras: extra dict
:param bool update: set the update flag on import
:return: Indicator object
:rtype: Indicator<|endoftext|>
|
381899db2574e473a871e173f74c17e6ecac7722c1f286d6dba2e70efc4b4a36
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
order = dict(((v, i) for (i, v) in enumerate(S)))
T = list(T)
T.sort(key=(lambda x: (order[x] if (x in order) else 27)))
return ''.join(T)
|
:type S: str
:type T: str
:rtype: str
|
String/791. Custom Sort String.py
|
customSortString
|
beckswu/Leetcode
| 138 |
python
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
order = dict(((v, i) for (i, v) in enumerate(S)))
T = list(T)
T.sort(key=(lambda x: (order[x] if (x in order) else 27)))
return .join(T)
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
order = dict(((v, i) for (i, v) in enumerate(S)))
T = list(T)
T.sort(key=(lambda x: (order[x] if (x in order) else 27)))
return .join(T)<|docstring|>:type S: str
:type T: str
:rtype: str<|endoftext|>
|
04c2d073ff02b8c1eb69502647c85482ab38e7d69674ffe15ff071fed07e4872
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
m = {c: idx for (idx, c) in enumerate(S)}
queue = []
for c in T:
heapq.heappush(queue, (m.get(c, sys.maxint), c))
ret = ''
while queue:
(_, v) = heapq.heappop(queue)
ret += v
return ret
|
:type S: str
:type T: str
:rtype: str
|
String/791. Custom Sort String.py
|
customSortString
|
beckswu/Leetcode
| 138 |
python
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
m = {c: idx for (idx, c) in enumerate(S)}
queue = []
for c in T:
heapq.heappush(queue, (m.get(c, sys.maxint), c))
ret =
while queue:
(_, v) = heapq.heappop(queue)
ret += v
return ret
|
def customSortString(self, S, T):
'\n :type S: str\n :type T: str\n :rtype: str\n '
m = {c: idx for (idx, c) in enumerate(S)}
queue = []
for c in T:
heapq.heappush(queue, (m.get(c, sys.maxint), c))
ret =
while queue:
(_, v) = heapq.heappop(queue)
ret += v
return ret<|docstring|>:type S: str
:type T: str
:rtype: str<|endoftext|>
|
0b6cf160ceb47b6d6211ec3daee5542ac70f789d818cee07d7d85314e3208000
|
def parse_cookie(cookie):
'\n Return a dictionary parsed from a `Cookie:` header string.\n '
cookiedict = {}
for chunk in cookie.split(';'):
if ('=' in chunk):
(key, val) = chunk.split('=', 1)
else:
(key, val) = ('', chunk)
(key, val) = (key.strip(), val.strip())
if (key or val):
cookiedict[key] = cookies._unquote(val)
return cookiedict
|
Return a dictionary parsed from a `Cookie:` header string.
|
http/cookie.py
|
parse_cookie
|
krispati2013/django
| 61,676 |
python
|
def parse_cookie(cookie):
'\n \n '
cookiedict = {}
for chunk in cookie.split(';'):
if ('=' in chunk):
(key, val) = chunk.split('=', 1)
else:
(key, val) = (, chunk)
(key, val) = (key.strip(), val.strip())
if (key or val):
cookiedict[key] = cookies._unquote(val)
return cookiedict
|
def parse_cookie(cookie):
'\n \n '
cookiedict = {}
for chunk in cookie.split(';'):
if ('=' in chunk):
(key, val) = chunk.split('=', 1)
else:
(key, val) = (, chunk)
(key, val) = (key.strip(), val.strip())
if (key or val):
cookiedict[key] = cookies._unquote(val)
return cookiedict<|docstring|>Return a dictionary parsed from a `Cookie:` header string.<|endoftext|>
|
cf2ebeea07917d4981bfcb002559f3c49c23a54b14bd09fdf8d5385fcc709a3b
|
@argument('supported_connection_id')
def get(self, supported_connection_id):
'Get the supported connection with the specified ID\n\n \x0c\n :param supported_connection_id: the id of the supported connection\n to retrieve\n :type supported_connection_id: str\n\n :returns: a supported connection object\n :type: dict\n '
return self.client.get_supported_connection(supported_connection_id)
|
Get the supported connection with the specified ID
:param supported_connection_id: the id of the supported connection
to retrieve
:type supported_connection_id: str
:returns: a supported connection object
:type: dict
|
pureport_client/commands/supported_connections/__init__.py
|
get
|
pureport/pureport-python-client
| 4 |
python
|
@argument('supported_connection_id')
def get(self, supported_connection_id):
'Get the supported connection with the specified ID\n\n \x0c\n :param supported_connection_id: the id of the supported connection\n to retrieve\n :type supported_connection_id: str\n\n :returns: a supported connection object\n :type: dict\n '
return self.client.get_supported_connection(supported_connection_id)
|
@argument('supported_connection_id')
def get(self, supported_connection_id):
'Get the supported connection with the specified ID\n\n \x0c\n :param supported_connection_id: the id of the supported connection\n to retrieve\n :type supported_connection_id: str\n\n :returns: a supported connection object\n :type: dict\n '
return self.client.get_supported_connection(supported_connection_id)<|docstring|>Get the supported connection with the specified ID
:param supported_connection_id: the id of the supported connection
to retrieve
:type supported_connection_id: str
:returns: a supported connection object
:type: dict<|endoftext|>
|
81a823f5258b16cfa5c6935467bdd338a052d22fbdae6dca1a39f065611af224
|
def beginning_of_day(dt: datetime) -> datetime:
'\n Returns a data object representing beginning of day on the date specified.\n Here beginning is defined as 0 hours, minutes, second, microseconds after day\n starts.\n '
time_into_day = timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second, microseconds=dt.microsecond)
return (dt - time_into_day)
|
Returns a data object representing beginning of day on the date specified.
Here beginning is defined as 0 hours, minutes, second, microseconds after day
starts.
|
scl_time.py
|
beginning_of_day
|
slessans/scl-time
| 0 |
python
|
def beginning_of_day(dt: datetime) -> datetime:
'\n Returns a data object representing beginning of day on the date specified.\n Here beginning is defined as 0 hours, minutes, second, microseconds after day\n starts.\n '
time_into_day = timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second, microseconds=dt.microsecond)
return (dt - time_into_day)
|
def beginning_of_day(dt: datetime) -> datetime:
'\n Returns a data object representing beginning of day on the date specified.\n Here beginning is defined as 0 hours, minutes, second, microseconds after day\n starts.\n '
time_into_day = timedelta(hours=dt.hour, minutes=dt.minute, seconds=dt.second, microseconds=dt.microsecond)
return (dt - time_into_day)<|docstring|>Returns a data object representing beginning of day on the date specified.
Here beginning is defined as 0 hours, minutes, second, microseconds after day
starts.<|endoftext|>
|
49e61f48d210f570651f7e278fed2f27f6c4da55e3c827c90724ba12f66cedca
|
def _check_valid_aware_datetime(dt):
'\n Checks that the argument is a valid instance of datetime and that it is not naive\n '
if (not isinstance(dt, datetime)):
raise ValueError('datetime expected')
if (dt.tzinfo is None):
raise ValueError('DateTimeInterval cannot handle naive datetimes.')
|
Checks that the argument is a valid instance of datetime and that it is not naive
|
scl_time.py
|
_check_valid_aware_datetime
|
slessans/scl-time
| 0 |
python
|
def _check_valid_aware_datetime(dt):
'\n \n '
if (not isinstance(dt, datetime)):
raise ValueError('datetime expected')
if (dt.tzinfo is None):
raise ValueError('DateTimeInterval cannot handle naive datetimes.')
|
def _check_valid_aware_datetime(dt):
'\n \n '
if (not isinstance(dt, datetime)):
raise ValueError('datetime expected')
if (dt.tzinfo is None):
raise ValueError('DateTimeInterval cannot handle naive datetimes.')<|docstring|>Checks that the argument is a valid instance of datetime and that it is not naive<|endoftext|>
|
1b4eea95124ebec8ce1c5fc9d7fba08591ee9b1675eb9a81b2674c9ad0829e0e
|
def time_intervals_between(start: datetime, end: datetime, interval_length: timedelta, limit_to_end=False):
'\n Generator for DateTimeIntervals of length interval_length during this time period.\n\n If limit_by_end is True, the end date of the last time period will not exceed self.end even if\n it causes the final interval to be shorter than interval_length.\n\n If limit_by_end is False, the last interval will be interval_length even if it exceeds self.end\n '
while (start < end):
interval_end = (start + interval_length)
if (limit_to_end and (interval_end > end)):
interval_end = end
(yield DateTimeInterval(start, interval_end))
start = interval_end
|
Generator for DateTimeIntervals of length interval_length during this time period.
If limit_by_end is True, the end date of the last time period will not exceed self.end even if
it causes the final interval to be shorter than interval_length.
If limit_by_end is False, the last interval will be interval_length even if it exceeds self.end
|
scl_time.py
|
time_intervals_between
|
slessans/scl-time
| 0 |
python
|
def time_intervals_between(start: datetime, end: datetime, interval_length: timedelta, limit_to_end=False):
'\n Generator for DateTimeIntervals of length interval_length during this time period.\n\n If limit_by_end is True, the end date of the last time period will not exceed self.end even if\n it causes the final interval to be shorter than interval_length.\n\n If limit_by_end is False, the last interval will be interval_length even if it exceeds self.end\n '
while (start < end):
interval_end = (start + interval_length)
if (limit_to_end and (interval_end > end)):
interval_end = end
(yield DateTimeInterval(start, interval_end))
start = interval_end
|
def time_intervals_between(start: datetime, end: datetime, interval_length: timedelta, limit_to_end=False):
'\n Generator for DateTimeIntervals of length interval_length during this time period.\n\n If limit_by_end is True, the end date of the last time period will not exceed self.end even if\n it causes the final interval to be shorter than interval_length.\n\n If limit_by_end is False, the last interval will be interval_length even if it exceeds self.end\n '
while (start < end):
interval_end = (start + interval_length)
if (limit_to_end and (interval_end > end)):
interval_end = end
(yield DateTimeInterval(start, interval_end))
start = interval_end<|docstring|>Generator for DateTimeIntervals of length interval_length during this time period.
If limit_by_end is True, the end date of the last time period will not exceed self.end even if
it causes the final interval to be shorter than interval_length.
If limit_by_end is False, the last interval will be interval_length even if it exceeds self.end<|endoftext|>
|
980ae24f8ce7bb16d0a265de3d72ce708a2edd7d1c4306f7ba515f774d5f3f66
|
def days_between(start_of: datetime, end_of: datetime):
'\n Generator for intervals of length 1 day. They will start at the beginning of\n start_of -- that is, beginning_of_day(start_of) and end at the end of end_of,\n that is beginning_of_day(end + timedelta(days=1)). The datetimes either must both be\n naive, or must have the same timezone.\n '
if (start_of.tzinfo != end_of.tzinfo):
raise ValueError('start_of and end_of must either have same timezone or both be naive.')
return time_intervals_between(beginning_of_day(start_of), beginning_of_day((end_of + timedelta(days=1))), timedelta(days=1))
|
Generator for intervals of length 1 day. They will start at the beginning of
start_of -- that is, beginning_of_day(start_of) and end at the end of end_of,
that is beginning_of_day(end + timedelta(days=1)). The datetimes either must both be
naive, or must have the same timezone.
|
scl_time.py
|
days_between
|
slessans/scl-time
| 0 |
python
|
def days_between(start_of: datetime, end_of: datetime):
'\n Generator for intervals of length 1 day. They will start at the beginning of\n start_of -- that is, beginning_of_day(start_of) and end at the end of end_of,\n that is beginning_of_day(end + timedelta(days=1)). The datetimes either must both be\n naive, or must have the same timezone.\n '
if (start_of.tzinfo != end_of.tzinfo):
raise ValueError('start_of and end_of must either have same timezone or both be naive.')
return time_intervals_between(beginning_of_day(start_of), beginning_of_day((end_of + timedelta(days=1))), timedelta(days=1))
|
def days_between(start_of: datetime, end_of: datetime):
'\n Generator for intervals of length 1 day. They will start at the beginning of\n start_of -- that is, beginning_of_day(start_of) and end at the end of end_of,\n that is beginning_of_day(end + timedelta(days=1)). The datetimes either must both be\n naive, or must have the same timezone.\n '
if (start_of.tzinfo != end_of.tzinfo):
raise ValueError('start_of and end_of must either have same timezone or both be naive.')
return time_intervals_between(beginning_of_day(start_of), beginning_of_day((end_of + timedelta(days=1))), timedelta(days=1))<|docstring|>Generator for intervals of length 1 day. They will start at the beginning of
start_of -- that is, beginning_of_day(start_of) and end at the end of end_of,
that is beginning_of_day(end + timedelta(days=1)). The datetimes either must both be
naive, or must have the same timezone.<|endoftext|>
|
04f99c6179b90c86757deed711405acfb47d4932d36e7ba88470d97ae17bb0f4
|
def intersection_of_intervals(intervals1, intervals2):
'\n intervals1 and intervals2 must be iterable or collection of non-overlapping DateTimeInterval objects\n in strictly ascending order.\n\n Returns generator x of ascending non-overlapping intervals i such that for all i in x\n i is contained by some interval in intervals1 and i is contained by some interval in intervals2.\n '
i1 = next(intervals1)
i2 = next(intervals2)
while ((i1 is not None) and (i2 is not None)):
if i1.is_before(i2):
i1 = next(intervals1)
continue
if i1.is_after(i2):
i2 = next(intervals2)
continue
assert i1.overlaps(i2)
overlap = i1.overlap(i2)
(yield overlap)
assert (i1.end >= overlap.end)
assert (i2.end >= overlap.end)
assert (not ((i1.end > overlap.end) and (i2.end > overlap.end)))
assert ((i1.end == overlap.end) or (i2.end == overlap.end))
if (i1.end == overlap.end):
i1 = next(intervals1)
if (i2.end == overlap.end):
i2 = next(intervals2)
|
intervals1 and intervals2 must be iterable or collection of non-overlapping DateTimeInterval objects
in strictly ascending order.
Returns generator x of ascending non-overlapping intervals i such that for all i in x
i is contained by some interval in intervals1 and i is contained by some interval in intervals2.
|
scl_time.py
|
intersection_of_intervals
|
slessans/scl-time
| 0 |
python
|
def intersection_of_intervals(intervals1, intervals2):
'\n intervals1 and intervals2 must be iterable or collection of non-overlapping DateTimeInterval objects\n in strictly ascending order.\n\n Returns generator x of ascending non-overlapping intervals i such that for all i in x\n i is contained by some interval in intervals1 and i is contained by some interval in intervals2.\n '
i1 = next(intervals1)
i2 = next(intervals2)
while ((i1 is not None) and (i2 is not None)):
if i1.is_before(i2):
i1 = next(intervals1)
continue
if i1.is_after(i2):
i2 = next(intervals2)
continue
assert i1.overlaps(i2)
overlap = i1.overlap(i2)
(yield overlap)
assert (i1.end >= overlap.end)
assert (i2.end >= overlap.end)
assert (not ((i1.end > overlap.end) and (i2.end > overlap.end)))
assert ((i1.end == overlap.end) or (i2.end == overlap.end))
if (i1.end == overlap.end):
i1 = next(intervals1)
if (i2.end == overlap.end):
i2 = next(intervals2)
|
def intersection_of_intervals(intervals1, intervals2):
'\n intervals1 and intervals2 must be iterable or collection of non-overlapping DateTimeInterval objects\n in strictly ascending order.\n\n Returns generator x of ascending non-overlapping intervals i such that for all i in x\n i is contained by some interval in intervals1 and i is contained by some interval in intervals2.\n '
i1 = next(intervals1)
i2 = next(intervals2)
while ((i1 is not None) and (i2 is not None)):
if i1.is_before(i2):
i1 = next(intervals1)
continue
if i1.is_after(i2):
i2 = next(intervals2)
continue
assert i1.overlaps(i2)
overlap = i1.overlap(i2)
(yield overlap)
assert (i1.end >= overlap.end)
assert (i2.end >= overlap.end)
assert (not ((i1.end > overlap.end) and (i2.end > overlap.end)))
assert ((i1.end == overlap.end) or (i2.end == overlap.end))
if (i1.end == overlap.end):
i1 = next(intervals1)
if (i2.end == overlap.end):
i2 = next(intervals2)<|docstring|>intervals1 and intervals2 must be iterable or collection of non-overlapping DateTimeInterval objects
in strictly ascending order.
Returns generator x of ascending non-overlapping intervals i such that for all i in x
i is contained by some interval in intervals1 and i is contained by some interval in intervals2.<|endoftext|>
|
42b7db3d9a87383be9bc373a4d120f7841f1e2eafe78a0ab6d9d870b7a91b486
|
def _non_overlapping_intervals(of_interval: DateTimeInterval, with_interval: DateTimeInterval):
'\n Returns intervals from of_interval that do not overlap with with_interval.\n '
before = None
after = None
if (with_interval.start > of_interval.start):
before = DateTimeInterval(of_interval.start, with_interval.start)
if (of_interval.end > with_interval.end):
after = DateTimeInterval(with_interval.end, of_interval.end)
return (before, after)
|
Returns intervals from of_interval that do not overlap with with_interval.
|
scl_time.py
|
_non_overlapping_intervals
|
slessans/scl-time
| 0 |
python
|
def _non_overlapping_intervals(of_interval: DateTimeInterval, with_interval: DateTimeInterval):
'\n \n '
before = None
after = None
if (with_interval.start > of_interval.start):
before = DateTimeInterval(of_interval.start, with_interval.start)
if (of_interval.end > with_interval.end):
after = DateTimeInterval(with_interval.end, of_interval.end)
return (before, after)
|
def _non_overlapping_intervals(of_interval: DateTimeInterval, with_interval: DateTimeInterval):
'\n \n '
before = None
after = None
if (with_interval.start > of_interval.start):
before = DateTimeInterval(of_interval.start, with_interval.start)
if (of_interval.end > with_interval.end):
after = DateTimeInterval(with_interval.end, of_interval.end)
return (before, after)<|docstring|>Returns intervals from of_interval that do not overlap with with_interval.<|endoftext|>
|
57149d52fa3bc78abebed194c44eb1271213c63c94070773b31775c9c8dc4abd
|
def _smooth_status_intervals(status_intervals):
'\n Finds intervals that abut with the same status and turns them into one larger interval\n '
smoothed = []
last_status_interval = None
for status_interval in status_intervals:
assert ((last_status_interval is None) or (last_status_interval.interval.end <= status_interval.interval.start))
if (last_status_interval and (last_status_interval.status == status_interval.status) and (last_status_interval.interval.end == status_interval.interval.start)):
status_interval = _SingleStatusInterval(interval=DateTimeInterval(last_status_interval.interval.start, status_interval.interval.end), status=last_status_interval.status)
elif last_status_interval:
smoothed.append(last_status_interval)
last_status_interval = status_interval
if last_status_interval:
smoothed.append(last_status_interval)
return smoothed
|
Finds intervals that abut with the same status and turns them into one larger interval
|
scl_time.py
|
_smooth_status_intervals
|
slessans/scl-time
| 0 |
python
|
def _smooth_status_intervals(status_intervals):
'\n \n '
smoothed = []
last_status_interval = None
for status_interval in status_intervals:
assert ((last_status_interval is None) or (last_status_interval.interval.end <= status_interval.interval.start))
if (last_status_interval and (last_status_interval.status == status_interval.status) and (last_status_interval.interval.end == status_interval.interval.start)):
status_interval = _SingleStatusInterval(interval=DateTimeInterval(last_status_interval.interval.start, status_interval.interval.end), status=last_status_interval.status)
elif last_status_interval:
smoothed.append(last_status_interval)
last_status_interval = status_interval
if last_status_interval:
smoothed.append(last_status_interval)
return smoothed
|
def _smooth_status_intervals(status_intervals):
'\n \n '
smoothed = []
last_status_interval = None
for status_interval in status_intervals:
assert ((last_status_interval is None) or (last_status_interval.interval.end <= status_interval.interval.start))
if (last_status_interval and (last_status_interval.status == status_interval.status) and (last_status_interval.interval.end == status_interval.interval.start)):
status_interval = _SingleStatusInterval(interval=DateTimeInterval(last_status_interval.interval.start, status_interval.interval.end), status=last_status_interval.status)
elif last_status_interval:
smoothed.append(last_status_interval)
last_status_interval = status_interval
if last_status_interval:
smoothed.append(last_status_interval)
return smoothed<|docstring|>Finds intervals that abut with the same status and turns them into one larger interval<|endoftext|>
|
e0caa79b9d081dac469f49aa791100e5a68c83b72f21b75a8c3437ca98d1bc74
|
def contains(self, dt: datetime) -> bool:
'\n If this moment is contained by this time interval\n :param dt:\n :return:\n '
_check_valid_aware_datetime(dt)
return (self.start <= dt < self.end)
|
If this moment is contained by this time interval
:param dt:
:return:
|
scl_time.py
|
contains
|
slessans/scl-time
| 0 |
python
|
def contains(self, dt: datetime) -> bool:
'\n If this moment is contained by this time interval\n :param dt:\n :return:\n '
_check_valid_aware_datetime(dt)
return (self.start <= dt < self.end)
|
def contains(self, dt: datetime) -> bool:
'\n If this moment is contained by this time interval\n :param dt:\n :return:\n '
_check_valid_aware_datetime(dt)
return (self.start <= dt < self.end)<|docstring|>If this moment is contained by this time interval
:param dt:
:return:<|endoftext|>
|
5461708e8fd7c56c45ba8279fcea27c0adcebf7697013d3f5069e75125695715
|
def covers(self, time_interval) -> bool:
"\n Returns true if the passed time interval is completely covered by this time interval.\n Note that this is true if the ends dates are equal. since they aren't part of the interval, if\n the end dates are equal then all instances of the passed time zone still have a corresponding instance\n in this time zone.\n :param time_interval:\n :return:\n "
return (self.contains(time_interval.start) and (self.start <= time_interval.end <= self.end))
|
Returns true if the passed time interval is completely covered by this time interval.
Note that this is true if the ends dates are equal. since they aren't part of the interval, if
the end dates are equal then all instances of the passed time zone still have a corresponding instance
in this time zone.
:param time_interval:
:return:
|
scl_time.py
|
covers
|
slessans/scl-time
| 0 |
python
|
def covers(self, time_interval) -> bool:
"\n Returns true if the passed time interval is completely covered by this time interval.\n Note that this is true if the ends dates are equal. since they aren't part of the interval, if\n the end dates are equal then all instances of the passed time zone still have a corresponding instance\n in this time zone.\n :param time_interval:\n :return:\n "
return (self.contains(time_interval.start) and (self.start <= time_interval.end <= self.end))
|
def covers(self, time_interval) -> bool:
"\n Returns true if the passed time interval is completely covered by this time interval.\n Note that this is true if the ends dates are equal. since they aren't part of the interval, if\n the end dates are equal then all instances of the passed time zone still have a corresponding instance\n in this time zone.\n :param time_interval:\n :return:\n "
return (self.contains(time_interval.start) and (self.start <= time_interval.end <= self.end))<|docstring|>Returns true if the passed time interval is completely covered by this time interval.
Note that this is true if the ends dates are equal. since they aren't part of the interval, if
the end dates are equal then all instances of the passed time zone still have a corresponding instance
in this time zone.
:param time_interval:
:return:<|endoftext|>
|
d63a51a250bbe3268e36457401eda3194776409f4d454688335020a5eada4b1d
|
def overlaps(self, time_interval) -> bool:
'\n True if any instants of the passed time interval are in this time interval\n :param time_interval:\n :return:\n '
return ((time_interval.start < self.end) and (self.start < time_interval.end))
|
True if any instants of the passed time interval are in this time interval
:param time_interval:
:return:
|
scl_time.py
|
overlaps
|
slessans/scl-time
| 0 |
python
|
def overlaps(self, time_interval) -> bool:
'\n True if any instants of the passed time interval are in this time interval\n :param time_interval:\n :return:\n '
return ((time_interval.start < self.end) and (self.start < time_interval.end))
|
def overlaps(self, time_interval) -> bool:
'\n True if any instants of the passed time interval are in this time interval\n :param time_interval:\n :return:\n '
return ((time_interval.start < self.end) and (self.start < time_interval.end))<|docstring|>True if any instants of the passed time interval are in this time interval
:param time_interval:
:return:<|endoftext|>
|
308725d48a219a04ac0590313ba4170aa6b35d8b21ee92360d4f0302f9b3918d
|
def intervals(self, interval_length: timedelta, limit_by_end=True):
'\n see time_intervals_between for argument info\n '
return time_intervals_between(self.start, self.end, interval_length, limit_by_end)
|
see time_intervals_between for argument info
|
scl_time.py
|
intervals
|
slessans/scl-time
| 0 |
python
|
def intervals(self, interval_length: timedelta, limit_by_end=True):
'\n \n '
return time_intervals_between(self.start, self.end, interval_length, limit_by_end)
|
def intervals(self, interval_length: timedelta, limit_by_end=True):
'\n \n '
return time_intervals_between(self.start, self.end, interval_length, limit_by_end)<|docstring|>see time_intervals_between for argument info<|endoftext|>
|
1e9ccc367f09545e0bcac2b76c948e81f024bf66d9d0581fc3617e437413e467
|
def intervals_with_status(self, status):
'\n Returns DateTimeIntervals where status is status. Returned intervals\n will be ascending, non-overlapping.\n '
return (si.interval for si in self._intervals if (si.status == status))
|
Returns DateTimeIntervals where status is status. Returned intervals
will be ascending, non-overlapping.
|
scl_time.py
|
intervals_with_status
|
slessans/scl-time
| 0 |
python
|
def intervals_with_status(self, status):
'\n Returns DateTimeIntervals where status is status. Returned intervals\n will be ascending, non-overlapping.\n '
return (si.interval for si in self._intervals if (si.status == status))
|
def intervals_with_status(self, status):
'\n Returns DateTimeIntervals where status is status. Returned intervals\n will be ascending, non-overlapping.\n '
return (si.interval for si in self._intervals if (si.status == status))<|docstring|>Returns DateTimeIntervals where status is status. Returned intervals
will be ascending, non-overlapping.<|endoftext|>
|
95dee262fdfa24937c6e2974136bb8b7b8ec666d2b0cb703d89a26dd203eaa8c
|
def cleanText(Ctext):
'\n removes punctuation, stopwords and returns lowercase text in a list of single words\n '
Ctext = Ctext.lower()
from bs4 import BeautifulSoup
Ctext = BeautifulSoup(Ctext, features='lxml').get_text()
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\\w+')
Ctext = tokenizer.tokenize(Ctext)
from nltk.corpus import stopwords
clean = [word for word in Ctext if (word not in stopwords.words('english'))]
return clean
|
removes punctuation, stopwords and returns lowercase text in a list of single words
|
project_sol.py
|
cleanText
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def cleanText(Ctext):
'\n \n '
Ctext = Ctext.lower()
from bs4 import BeautifulSoup
Ctext = BeautifulSoup(Ctext, features='lxml').get_text()
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\\w+')
Ctext = tokenizer.tokenize(Ctext)
from nltk.corpus import stopwords
clean = [word for word in Ctext if (word not in stopwords.words('english'))]
return clean
|
def cleanText(Ctext):
'\n \n '
Ctext = Ctext.lower()
from bs4 import BeautifulSoup
Ctext = BeautifulSoup(Ctext, features='lxml').get_text()
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\\w+')
Ctext = tokenizer.tokenize(Ctext)
from nltk.corpus import stopwords
clean = [word for word in Ctext if (word not in stopwords.words('english'))]
return clean<|docstring|>removes punctuation, stopwords and returns lowercase text in a list of single words<|endoftext|>
|
1e719c9310057fd8c2b7cf945108e01960a8b033ef8aa846a285e2a83acf1f1d
|
def loadPositive():
'\n loading positive dictionary\n '
myfile = open('positive.csv', 'r')
positives = myfile.readlines()
positive = [pos.strip().lower() for pos in positives]
return positive
|
loading positive dictionary
|
project_sol.py
|
loadPositive
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def loadPositive():
'\n \n '
myfile = open('positive.csv', 'r')
positives = myfile.readlines()
positive = [pos.strip().lower() for pos in positives]
return positive
|
def loadPositive():
'\n \n '
myfile = open('positive.csv', 'r')
positives = myfile.readlines()
positive = [pos.strip().lower() for pos in positives]
return positive<|docstring|>loading positive dictionary<|endoftext|>
|
069eb368f286175e7f5db5fa6189c57db442ed13968df08e36c0e1c0e872538d
|
def loadNegative():
'\n loading positive dictionary\n '
myfile = open('negative.csv', 'r')
negatives = myfile.readlines()
negative = [neg.strip().lower() for neg in negatives]
return negative
|
loading positive dictionary
|
project_sol.py
|
loadNegative
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def loadNegative():
'\n \n '
myfile = open('negative.csv', 'r')
negatives = myfile.readlines()
negative = [neg.strip().lower() for neg in negatives]
return negative
|
def loadNegative():
'\n \n '
myfile = open('negative.csv', 'r')
negatives = myfile.readlines()
negative = [neg.strip().lower() for neg in negatives]
return negative<|docstring|>loading positive dictionary<|endoftext|>
|
f0d293cefaf5db60df5bc2ea01b16ce9b62bc20407cb850b127a2d9a3285bff0
|
def loadConstrain():
'\n loading constraining dictionary\n '
myfile = open('constraining_dictionary.xlsx', 'r')
constrains = myfile.readlines()
constrain = [con.strip().lower() for con in constrains]
return constrain
|
loading constraining dictionary
|
project_sol.py
|
loadConstrain
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def loadConstrain():
'\n \n '
myfile = open('constraining_dictionary.xlsx', 'r')
constrains = myfile.readlines()
constrain = [con.strip().lower() for con in constrains]
return constrain
|
def loadConstrain():
'\n \n '
myfile = open('constraining_dictionary.xlsx', 'r')
constrains = myfile.readlines()
constrain = [con.strip().lower() for con in constrains]
return constrain<|docstring|>loading constraining dictionary<|endoftext|>
|
3da3e25a180080c0645a2f0fd0c639e2d50735ceffd5dbc83a03a04f300577be
|
def loadUncertain():
'\n loading uncertainity dictionary\n '
myfile = open('uncertainty_dictionary.xlsx', 'r')
uncertains = myfile.readlines()
uncertain = [un.strip().lower() for un in uncertains]
return uncertain
|
loading uncertainity dictionary
|
project_sol.py
|
loadUncertain
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def loadUncertain():
'\n \n '
myfile = open('uncertainty_dictionary.xlsx', 'r')
uncertains = myfile.readlines()
uncertain = [un.strip().lower() for un in uncertains]
return uncertain
|
def loadUncertain():
'\n \n '
myfile = open('uncertainty_dictionary.xlsx', 'r')
uncertains = myfile.readlines()
uncertain = [un.strip().lower() for un in uncertains]
return uncertain<|docstring|>loading uncertainity dictionary<|endoftext|>
|
9b89352ab9b2c3a90d5a3cf7d48d1122116397240a861a0e0b8ebb658e5b218c
|
def countNeg(cleantext, negative):
'\n counts negative words in cleantext\n '
negs = [word for word in cleantext if (word in negative)]
return len(negs)
|
counts negative words in cleantext
|
project_sol.py
|
countNeg
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def countNeg(cleantext, negative):
'\n \n '
negs = [word for word in cleantext if (word in negative)]
return len(negs)
|
def countNeg(cleantext, negative):
'\n \n '
negs = [word for word in cleantext if (word in negative)]
return len(negs)<|docstring|>counts negative words in cleantext<|endoftext|>
|
b57c6a95f6bc6a31bc903a456cbaba93ff1775cfc1138522a7dff5205705854f
|
def countPos(cleantext, positive):
'\n counts negative words in cleantext\n '
pos = [word for word in cleantext if (word in positive)]
return len(pos)
|
counts negative words in cleantext
|
project_sol.py
|
countPos
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def countPos(cleantext, positive):
'\n \n '
pos = [word for word in cleantext if (word in positive)]
return len(pos)
|
def countPos(cleantext, positive):
'\n \n '
pos = [word for word in cleantext if (word in positive)]
return len(pos)<|docstring|>counts negative words in cleantext<|endoftext|>
|
dc1ea9686666227d1518c0e2ebf3f04da8beca2c07ca59d8db5d512046f8b965
|
def countCons(cleantext, constrain):
'\n counts negative words in cleantext\n '
con = [word for word in cleantext if (word in constrain)]
return len(con)
|
counts negative words in cleantext
|
project_sol.py
|
countCons
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def countCons(cleantext, constrain):
'\n \n '
con = [word for word in cleantext if (word in constrain)]
return len(con)
|
def countCons(cleantext, constrain):
'\n \n '
con = [word for word in cleantext if (word in constrain)]
return len(con)<|docstring|>counts negative words in cleantext<|endoftext|>
|
2813cfd08cae65fe69d34caf94ea4082358da9ee4ef8ea09529d465629f9c206
|
def countUn(cleantext, uncertain):
'\n counts negative words in cleantext\n '
un = [word for word in cleantext if (word in uncertain)]
return len(un)
|
counts negative words in cleantext
|
project_sol.py
|
countUn
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def countUn(cleantext, uncertain):
'\n \n '
un = [word for word in cleantext if (word in uncertain)]
return len(un)
|
def countUn(cleantext, uncertain):
'\n \n '
un = [word for word in cleantext if (word in uncertain)]
return len(un)<|docstring|>counts negative words in cleantext<|endoftext|>
|
387712305fcd30a5056d1510844f569fafafd505ce08dbaf1d1b39a0a623371c
|
def getSentiment(cleantext, negative, positive):
'\n counts negative and positive words in cleantext and returns a score accordingly\n '
positive = loadPositive()
negative = loadNegative()
return ((countPos(cleantext, positive) - countNeg(cleantext, negative)) / ((countPos(cleantext, positive) + countNeg(cleantext, negative)) + 1e-06))
|
counts negative and positive words in cleantext and returns a score accordingly
|
project_sol.py
|
getSentiment
|
heroorkrishna/financial_texts_Sentimental_Analysis
| 2 |
python
|
def getSentiment(cleantext, negative, positive):
'\n \n '
positive = loadPositive()
negative = loadNegative()
return ((countPos(cleantext, positive) - countNeg(cleantext, negative)) / ((countPos(cleantext, positive) + countNeg(cleantext, negative)) + 1e-06))
|
def getSentiment(cleantext, negative, positive):
'\n \n '
positive = loadPositive()
negative = loadNegative()
return ((countPos(cleantext, positive) - countNeg(cleantext, negative)) / ((countPos(cleantext, positive) + countNeg(cleantext, negative)) + 1e-06))<|docstring|>counts negative and positive words in cleantext and returns a score accordingly<|endoftext|>
|
ff082dd13c825b6799a7a36233d18b2fc5483c24efb47a78d755e4dc9e46570b
|
def install_bio2bel_module(name: str, connection: Optional[str]=None, rebuild: bool=False) -> Optional[str]:
'Install Bio2BEL module.\n\n :param name: The name of the Bio2BEL module\n :param connection: The optional database connection\n :param rebuild: Should the cache not be used? Defaults to False.\n '
module_name = _SPECIAL_CASES.get(name, f'bio2bel_{name}')
pykeen_df_path = os.path.join(biokeen_config.data_directory, f'{name}.{biokeen_config.keen_tsv_ext}')
pykeen_df_summary_path = os.path.join(biokeen_config.data_directory, f'{name}.keen.summary.json')
json_path = os.path.join(biokeen_config.data_directory, f'{name}.bel.json')
if (os.path.exists(pykeen_df_path) and (not rebuild)):
logger.info(f'{EMOJI} {module_name} has already been retrieved. See: {pykeen_df_path}')
return pykeen_df_path
if (os.path.exists(json_path) and (not rebuild)):
logger.info(f'{EMOJI} loaded {module_name} JSON: {json_path}')
graph = from_json_path(json_path)
df = to_pykeen_df(graph)
to_pykeen_path(df, pykeen_df_path)
to_pykeen_summary_path(df, pykeen_df_summary_path)
return pykeen_df_path
bio2bel_module = ensure_bio2bel_installation(module_name)
logger.debug(f'{EMOJI} imported {module_name}')
manager_cls = bio2bel_module.Manager
if (not issubclass(manager_cls, BELManagerMixin)):
version = pkg_resources.get_distribution(module_name).version
logger.warning(f'{EMOJI} {module_name} v{version} does not produce BEL')
sys.exit(1)
manager = manager_cls(connection=connection)
if issubclass(manager_cls, AbstractManager):
if (not manager.is_populated()):
logger.info(f'{EMOJI} populating {module_name}')
manager.populate()
else:
logger.debug(f'{EMOJI} {module_name} has already been populated')
logger.debug(f'{EMOJI} generating BEL for {module_name}')
graph = manager.to_bel()
logger.debug(f'Summary: {graph.number_of_nodes()} nodes / {graph.number_of_edges()} edges')
to_json_path(graph, json_path, indent=2)
logger.debug(f'{EMOJI} generating PyKEEN TSV for {module_name}')
df = to_pykeen_df(graph)
to_pykeen_summary_path(df, pykeen_df_summary_path)
success = to_pykeen_path(df, pykeen_df_path)
if success:
logger.debug(f'{EMOJI} wrote PyKEEN TSV to {pykeen_df_path}')
return pykeen_df_path
logger.warning(f'{EMOJI} no statements generated')
|
Install Bio2BEL module.
:param name: The name of the Bio2BEL module
:param connection: The optional database connection
:param rebuild: Should the cache not be used? Defaults to False.
|
src/biokeen/content.py
|
install_bio2bel_module
|
SmartDataAnalytics/Bio-KEEN
| 38 |
python
|
def install_bio2bel_module(name: str, connection: Optional[str]=None, rebuild: bool=False) -> Optional[str]:
'Install Bio2BEL module.\n\n :param name: The name of the Bio2BEL module\n :param connection: The optional database connection\n :param rebuild: Should the cache not be used? Defaults to False.\n '
module_name = _SPECIAL_CASES.get(name, f'bio2bel_{name}')
pykeen_df_path = os.path.join(biokeen_config.data_directory, f'{name}.{biokeen_config.keen_tsv_ext}')
pykeen_df_summary_path = os.path.join(biokeen_config.data_directory, f'{name}.keen.summary.json')
json_path = os.path.join(biokeen_config.data_directory, f'{name}.bel.json')
if (os.path.exists(pykeen_df_path) and (not rebuild)):
logger.info(f'{EMOJI} {module_name} has already been retrieved. See: {pykeen_df_path}')
return pykeen_df_path
if (os.path.exists(json_path) and (not rebuild)):
logger.info(f'{EMOJI} loaded {module_name} JSON: {json_path}')
graph = from_json_path(json_path)
df = to_pykeen_df(graph)
to_pykeen_path(df, pykeen_df_path)
to_pykeen_summary_path(df, pykeen_df_summary_path)
return pykeen_df_path
bio2bel_module = ensure_bio2bel_installation(module_name)
logger.debug(f'{EMOJI} imported {module_name}')
manager_cls = bio2bel_module.Manager
if (not issubclass(manager_cls, BELManagerMixin)):
version = pkg_resources.get_distribution(module_name).version
logger.warning(f'{EMOJI} {module_name} v{version} does not produce BEL')
sys.exit(1)
manager = manager_cls(connection=connection)
if issubclass(manager_cls, AbstractManager):
if (not manager.is_populated()):
logger.info(f'{EMOJI} populating {module_name}')
manager.populate()
else:
logger.debug(f'{EMOJI} {module_name} has already been populated')
logger.debug(f'{EMOJI} generating BEL for {module_name}')
graph = manager.to_bel()
logger.debug(f'Summary: {graph.number_of_nodes()} nodes / {graph.number_of_edges()} edges')
to_json_path(graph, json_path, indent=2)
logger.debug(f'{EMOJI} generating PyKEEN TSV for {module_name}')
df = to_pykeen_df(graph)
to_pykeen_summary_path(df, pykeen_df_summary_path)
success = to_pykeen_path(df, pykeen_df_path)
if success:
logger.debug(f'{EMOJI} wrote PyKEEN TSV to {pykeen_df_path}')
return pykeen_df_path
logger.warning(f'{EMOJI} no statements generated')
|
def install_bio2bel_module(name: str, connection: Optional[str]=None, rebuild: bool=False) -> Optional[str]:
'Install Bio2BEL module.\n\n :param name: The name of the Bio2BEL module\n :param connection: The optional database connection\n :param rebuild: Should the cache not be used? Defaults to False.\n '
module_name = _SPECIAL_CASES.get(name, f'bio2bel_{name}')
pykeen_df_path = os.path.join(biokeen_config.data_directory, f'{name}.{biokeen_config.keen_tsv_ext}')
pykeen_df_summary_path = os.path.join(biokeen_config.data_directory, f'{name}.keen.summary.json')
json_path = os.path.join(biokeen_config.data_directory, f'{name}.bel.json')
if (os.path.exists(pykeen_df_path) and (not rebuild)):
logger.info(f'{EMOJI} {module_name} has already been retrieved. See: {pykeen_df_path}')
return pykeen_df_path
if (os.path.exists(json_path) and (not rebuild)):
logger.info(f'{EMOJI} loaded {module_name} JSON: {json_path}')
graph = from_json_path(json_path)
df = to_pykeen_df(graph)
to_pykeen_path(df, pykeen_df_path)
to_pykeen_summary_path(df, pykeen_df_summary_path)
return pykeen_df_path
bio2bel_module = ensure_bio2bel_installation(module_name)
logger.debug(f'{EMOJI} imported {module_name}')
manager_cls = bio2bel_module.Manager
if (not issubclass(manager_cls, BELManagerMixin)):
version = pkg_resources.get_distribution(module_name).version
logger.warning(f'{EMOJI} {module_name} v{version} does not produce BEL')
sys.exit(1)
manager = manager_cls(connection=connection)
if issubclass(manager_cls, AbstractManager):
if (not manager.is_populated()):
logger.info(f'{EMOJI} populating {module_name}')
manager.populate()
else:
logger.debug(f'{EMOJI} {module_name} has already been populated')
logger.debug(f'{EMOJI} generating BEL for {module_name}')
graph = manager.to_bel()
logger.debug(f'Summary: {graph.number_of_nodes()} nodes / {graph.number_of_edges()} edges')
to_json_path(graph, json_path, indent=2)
logger.debug(f'{EMOJI} generating PyKEEN TSV for {module_name}')
df = to_pykeen_df(graph)
to_pykeen_summary_path(df, pykeen_df_summary_path)
success = to_pykeen_path(df, pykeen_df_path)
if success:
logger.debug(f'{EMOJI} wrote PyKEEN TSV to {pykeen_df_path}')
return pykeen_df_path
logger.warning(f'{EMOJI} no statements generated')<|docstring|>Install Bio2BEL module.
:param name: The name of the Bio2BEL module
:param connection: The optional database connection
:param rebuild: Should the cache not be used? Defaults to False.<|endoftext|>
|
5066d1f532fa47abc827b2e5d8b3682b540a843bddda93076e31d7d02df625ec
|
def ensure_bio2bel_installation(package: str):
'Import a package, or install it.'
try:
b_module = importlib.import_module(package)
except ImportError:
logger.info(f'{EMOJI} pip install {package}')
from pip._internal import main as pip_main
with redirect_stdout(sys.stderr):
pip_exit_code = pip_main(['install', '-q', package])
if (0 != pip_exit_code):
logger.warning(f'{EMOJI} could not find {package} on PyPI. Try installing from GitHub with:')
name = package.split('_')[(- 1)]
logger.warning(f'''
pip install git+https://github.com/bio2bel/{name}.git
''')
sys.exit(1)
try:
return importlib.import_module(package)
except ImportError:
logger.exception(f'{EMOJI} failed to import {package}')
sys.exit(1)
return b_module
|
Import a package, or install it.
|
src/biokeen/content.py
|
ensure_bio2bel_installation
|
SmartDataAnalytics/Bio-KEEN
| 38 |
python
|
def ensure_bio2bel_installation(package: str):
try:
b_module = importlib.import_module(package)
except ImportError:
logger.info(f'{EMOJI} pip install {package}')
from pip._internal import main as pip_main
with redirect_stdout(sys.stderr):
pip_exit_code = pip_main(['install', '-q', package])
if (0 != pip_exit_code):
logger.warning(f'{EMOJI} could not find {package} on PyPI. Try installing from GitHub with:')
name = package.split('_')[(- 1)]
logger.warning(f'
pip install git+https://github.com/bio2bel/{name}.git
')
sys.exit(1)
try:
return importlib.import_module(package)
except ImportError:
logger.exception(f'{EMOJI} failed to import {package}')
sys.exit(1)
return b_module
|
def ensure_bio2bel_installation(package: str):
try:
b_module = importlib.import_module(package)
except ImportError:
logger.info(f'{EMOJI} pip install {package}')
from pip._internal import main as pip_main
with redirect_stdout(sys.stderr):
pip_exit_code = pip_main(['install', '-q', package])
if (0 != pip_exit_code):
logger.warning(f'{EMOJI} could not find {package} on PyPI. Try installing from GitHub with:')
name = package.split('_')[(- 1)]
logger.warning(f'
pip install git+https://github.com/bio2bel/{name}.git
')
sys.exit(1)
try:
return importlib.import_module(package)
except ImportError:
logger.exception(f'{EMOJI} failed to import {package}')
sys.exit(1)
return b_module<|docstring|>Import a package, or install it.<|endoftext|>
|
09e65b6633fa02d7039817d0a76ef9c18490a635b9325e24c15727760fbd69a6
|
def handle_bio2bel(module_name: str) -> np.ndarray:
'Load a Bio2BEL repository.\n\n :param module_name: The name of the bio2bel repository (with no prefix)\n '
path = install_bio2bel_module(module_name)
return np.loadtxt(fname=path, dtype=str, comments='@Comment@ Subject Predicate Object', delimiter='\t')
|
Load a Bio2BEL repository.
:param module_name: The name of the bio2bel repository (with no prefix)
|
src/biokeen/content.py
|
handle_bio2bel
|
SmartDataAnalytics/Bio-KEEN
| 38 |
python
|
def handle_bio2bel(module_name: str) -> np.ndarray:
'Load a Bio2BEL repository.\n\n :param module_name: The name of the bio2bel repository (with no prefix)\n '
path = install_bio2bel_module(module_name)
return np.loadtxt(fname=path, dtype=str, comments='@Comment@ Subject Predicate Object', delimiter='\t')
|
def handle_bio2bel(module_name: str) -> np.ndarray:
'Load a Bio2BEL repository.\n\n :param module_name: The name of the bio2bel repository (with no prefix)\n '
path = install_bio2bel_module(module_name)
return np.loadtxt(fname=path, dtype=str, comments='@Comment@ Subject Predicate Object', delimiter='\t')<|docstring|>Load a Bio2BEL repository.
:param module_name: The name of the bio2bel repository (with no prefix)<|endoftext|>
|
7b44092c7b373e79216e287301b8457da341832d7c3b7889f9691424f35e1c5c
|
def handle_bel_commons(network_id: Union[(int, str)], host: Optional[str]=None) -> np.ndarray:
'Load a BEL document from BEL Commons.\n\n :param network_id: The network identifier in BEL Commons\n :param host: The host for BEL Commons. Defaults to the Fraunhofer SCAI public instance.\n '
graph = from_web(int(network_id), host=host)
df = to_pykeen_df(graph)
return df.to_numpy()
|
Load a BEL document from BEL Commons.
:param network_id: The network identifier in BEL Commons
:param host: The host for BEL Commons. Defaults to the Fraunhofer SCAI public instance.
|
src/biokeen/content.py
|
handle_bel_commons
|
SmartDataAnalytics/Bio-KEEN
| 38 |
python
|
def handle_bel_commons(network_id: Union[(int, str)], host: Optional[str]=None) -> np.ndarray:
'Load a BEL document from BEL Commons.\n\n :param network_id: The network identifier in BEL Commons\n :param host: The host for BEL Commons. Defaults to the Fraunhofer SCAI public instance.\n '
graph = from_web(int(network_id), host=host)
df = to_pykeen_df(graph)
return df.to_numpy()
|
def handle_bel_commons(network_id: Union[(int, str)], host: Optional[str]=None) -> np.ndarray:
'Load a BEL document from BEL Commons.\n\n :param network_id: The network identifier in BEL Commons\n :param host: The host for BEL Commons. Defaults to the Fraunhofer SCAI public instance.\n '
graph = from_web(int(network_id), host=host)
df = to_pykeen_df(graph)
return df.to_numpy()<|docstring|>Load a BEL document from BEL Commons.
:param network_id: The network identifier in BEL Commons
:param host: The host for BEL Commons. Defaults to the Fraunhofer SCAI public instance.<|endoftext|>
|
8e1dcb11a3b214e4a6248ea3cd8e93377cd3fdf1a8d33fa141fb44f90e8e53ad
|
def build_optimizer(opt_method, lr, loss, max_gradient_norm=None, global_step=None, decay_steps=None):
" Build an optimizer and return a training op.\n\n Will also add checks for Nans in the gradient, and add some monitoring to\n tensorboard.\n\n Parameters\n ----------\n opt_method : str\n Either 'adam', 'sgd', or 'momentum'\n lr : float\n Learning rate for the optimizer\n loss : tf.Tensor\n Tensor containing the loss operation\n max_gradient_norm : float or None\n What the gradients should be clipped to. If None, no clipping used.\n global_step : tf.Variable or None\n Variable holding the global step\n decay_steps : int\n For sgd only. After how many steps to decay the learning rate.\n\n Returns\n -------\n train_op : tf op\n An op that can be run in a session to apply gradients to the trainable\n variables.\n "
with tf.variable_scope('optimizer'):
if (opt_method == 'adam'):
print('Optimizing with Adam')
opt = tf.train.AdamOptimizer(lr, epsilon=1e-06)
elif (opt_method == 'momentum'):
print('Optimizing with momentum')
opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=True)
elif (opt_method == 'sgd'):
if (decay_steps is not None):
lr = tf.train.exponential_decay(lr, global_step, decay_steps=(500 * 100), decay_rate=0.1, staircase=True)
opt = tf.train.GradientDescentOptimizer(lr)
tf.summary.scalar('learning_rate', lr)
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
if (max_gradient_norm is not None):
(gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
else:
norm = tf.sqrt(tf.reduce_sum([tf.reduce_sum((g ** 2)) for g in gradients if (g is not None)]))
grad_check = [tf.check_numerics(g, '{} gradient nan'.format(p.name)) for (g, p) in zip(gradients, params) if (g is not None)]
grad_check.append(tf.check_numerics(norm, 'global clip val nan'))
with tf.control_dependencies(grad_check):
train_op = opt.apply_gradients(zip(gradients, params), global_step=global_step)
def strip_name(name):
name = name.split(':')[0].split('/')
if ('fwd' in name):
name.remove('fwd')
if ('batch_normalization' in name):
name.remove('batch_normalization')
return '/'.join(name)
with tf.variable_scope('grads'):
tf.summary.scalar('all', norm)
[tf.summary.scalar('{}'.format(strip_name(p.name)), tf.norm(g)) for (g, p) in zip(gradients, params) if (g is not None)]
[tf.summary.histogram('{}'.format(strip_name(p.name)), g) for (g, p) in zip(gradients, params) if (g is not None)]
return train_op
|
Build an optimizer and return a training op.
Will also add checks for Nans in the gradient, and add some monitoring to
tensorboard.
Parameters
----------
opt_method : str
Either 'adam', 'sgd', or 'momentum'
lr : float
Learning rate for the optimizer
loss : tf.Tensor
Tensor containing the loss operation
max_gradient_norm : float or None
What the gradients should be clipped to. If None, no clipping used.
global_step : tf.Variable or None
Variable holding the global step
decay_steps : int
For sgd only. After how many steps to decay the learning rate.
Returns
-------
train_op : tf op
An op that can be run in a session to apply gradients to the trainable
variables.
|
tf_ops/general.py
|
build_optimizer
|
fbcotter/tf_ops
| 0 |
python
|
def build_optimizer(opt_method, lr, loss, max_gradient_norm=None, global_step=None, decay_steps=None):
" Build an optimizer and return a training op.\n\n Will also add checks for Nans in the gradient, and add some monitoring to\n tensorboard.\n\n Parameters\n ----------\n opt_method : str\n Either 'adam', 'sgd', or 'momentum'\n lr : float\n Learning rate for the optimizer\n loss : tf.Tensor\n Tensor containing the loss operation\n max_gradient_norm : float or None\n What the gradients should be clipped to. If None, no clipping used.\n global_step : tf.Variable or None\n Variable holding the global step\n decay_steps : int\n For sgd only. After how many steps to decay the learning rate.\n\n Returns\n -------\n train_op : tf op\n An op that can be run in a session to apply gradients to the trainable\n variables.\n "
with tf.variable_scope('optimizer'):
if (opt_method == 'adam'):
print('Optimizing with Adam')
opt = tf.train.AdamOptimizer(lr, epsilon=1e-06)
elif (opt_method == 'momentum'):
print('Optimizing with momentum')
opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=True)
elif (opt_method == 'sgd'):
if (decay_steps is not None):
lr = tf.train.exponential_decay(lr, global_step, decay_steps=(500 * 100), decay_rate=0.1, staircase=True)
opt = tf.train.GradientDescentOptimizer(lr)
tf.summary.scalar('learning_rate', lr)
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
if (max_gradient_norm is not None):
(gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
else:
norm = tf.sqrt(tf.reduce_sum([tf.reduce_sum((g ** 2)) for g in gradients if (g is not None)]))
grad_check = [tf.check_numerics(g, '{} gradient nan'.format(p.name)) for (g, p) in zip(gradients, params) if (g is not None)]
grad_check.append(tf.check_numerics(norm, 'global clip val nan'))
with tf.control_dependencies(grad_check):
train_op = opt.apply_gradients(zip(gradients, params), global_step=global_step)
def strip_name(name):
name = name.split(':')[0].split('/')
if ('fwd' in name):
name.remove('fwd')
if ('batch_normalization' in name):
name.remove('batch_normalization')
return '/'.join(name)
with tf.variable_scope('grads'):
tf.summary.scalar('all', norm)
[tf.summary.scalar('{}'.format(strip_name(p.name)), tf.norm(g)) for (g, p) in zip(gradients, params) if (g is not None)]
[tf.summary.histogram('{}'.format(strip_name(p.name)), g) for (g, p) in zip(gradients, params) if (g is not None)]
return train_op
|
def build_optimizer(opt_method, lr, loss, max_gradient_norm=None, global_step=None, decay_steps=None):
" Build an optimizer and return a training op.\n\n Will also add checks for Nans in the gradient, and add some monitoring to\n tensorboard.\n\n Parameters\n ----------\n opt_method : str\n Either 'adam', 'sgd', or 'momentum'\n lr : float\n Learning rate for the optimizer\n loss : tf.Tensor\n Tensor containing the loss operation\n max_gradient_norm : float or None\n What the gradients should be clipped to. If None, no clipping used.\n global_step : tf.Variable or None\n Variable holding the global step\n decay_steps : int\n For sgd only. After how many steps to decay the learning rate.\n\n Returns\n -------\n train_op : tf op\n An op that can be run in a session to apply gradients to the trainable\n variables.\n "
with tf.variable_scope('optimizer'):
if (opt_method == 'adam'):
print('Optimizing with Adam')
opt = tf.train.AdamOptimizer(lr, epsilon=1e-06)
elif (opt_method == 'momentum'):
print('Optimizing with momentum')
opt = tf.train.MomentumOptimizer(lr, momentum=0.9, use_nesterov=True)
elif (opt_method == 'sgd'):
if (decay_steps is not None):
lr = tf.train.exponential_decay(lr, global_step, decay_steps=(500 * 100), decay_rate=0.1, staircase=True)
opt = tf.train.GradientDescentOptimizer(lr)
tf.summary.scalar('learning_rate', lr)
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
if (max_gradient_norm is not None):
(gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
else:
norm = tf.sqrt(tf.reduce_sum([tf.reduce_sum((g ** 2)) for g in gradients if (g is not None)]))
grad_check = [tf.check_numerics(g, '{} gradient nan'.format(p.name)) for (g, p) in zip(gradients, params) if (g is not None)]
grad_check.append(tf.check_numerics(norm, 'global clip val nan'))
with tf.control_dependencies(grad_check):
train_op = opt.apply_gradients(zip(gradients, params), global_step=global_step)
def strip_name(name):
name = name.split(':')[0].split('/')
if ('fwd' in name):
name.remove('fwd')
if ('batch_normalization' in name):
name.remove('batch_normalization')
return '/'.join(name)
with tf.variable_scope('grads'):
tf.summary.scalar('all', norm)
[tf.summary.scalar('{}'.format(strip_name(p.name)), tf.norm(g)) for (g, p) in zip(gradients, params) if (g is not None)]
[tf.summary.histogram('{}'.format(strip_name(p.name)), g) for (g, p) in zip(gradients, params) if (g is not None)]
return train_op<|docstring|>Build an optimizer and return a training op.
Will also add checks for Nans in the gradient, and add some monitoring to
tensorboard.
Parameters
----------
opt_method : str
Either 'adam', 'sgd', or 'momentum'
lr : float
Learning rate for the optimizer
loss : tf.Tensor
Tensor containing the loss operation
max_gradient_norm : float or None
What the gradients should be clipped to. If None, no clipping used.
global_step : tf.Variable or None
Variable holding the global step
decay_steps : int
For sgd only. After how many steps to decay the learning rate.
Returns
-------
train_op : tf op
An op that can be run in a session to apply gradients to the trainable
variables.<|endoftext|>
|
a4b56869d1ee62090e8a0b821d70f1843086e240156182cc8e40538e58a9c6b0
|
def variable_with_wd(name, shape, stddev=None, wd=None, norm=2):
' Helper to create an initialized variable with weight decay.\n\n Note that the variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified. Also will add summaries\n for this variable.\n\n Internally, it calls tf.get_variable, so you can use this to re-get already\n defined variables (so long as the reuse scope is set to true). If it\n re-fetches an already existing variable, it will not add regularization\n again.\n\n Parameters\n ----------\n name: str\n name of the variable\n shape: list of ints\n shape of the variable you want to create\n stddev: positive float or None\n standard deviation of a truncated Gaussian\n wd: positive float or None\n add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this variable.\n norm: positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n\n Returns\n -------\n out : variable tensor\n '
if (stddev is None):
stddev = get_xavier_stddev(shape, uniform=False)
initializer = tf.truncated_normal_initializer(stddev=stddev)
var_before = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var = tf.get_variable(name, shape, dtype=tf.float32, initializer=initializer)
var_after = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if (len(var_before) != len(var_after)):
reg_loss = complex_reg(var, wd, norm)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, reg_loss)
variable_summaries(var, name)
return var
|
Helper to create an initialized variable with weight decay.
Note that the variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified. Also will add summaries
for this variable.
Internally, it calls tf.get_variable, so you can use this to re-get already
defined variables (so long as the reuse scope is set to true). If it
re-fetches an already existing variable, it will not add regularization
again.
Parameters
----------
name: str
name of the variable
shape: list of ints
shape of the variable you want to create
stddev: positive float or None
standard deviation of a truncated Gaussian
wd: positive float or None
add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this variable.
norm: positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
Returns
-------
out : variable tensor
|
tf_ops/general.py
|
variable_with_wd
|
fbcotter/tf_ops
| 0 |
python
|
def variable_with_wd(name, shape, stddev=None, wd=None, norm=2):
' Helper to create an initialized variable with weight decay.\n\n Note that the variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified. Also will add summaries\n for this variable.\n\n Internally, it calls tf.get_variable, so you can use this to re-get already\n defined variables (so long as the reuse scope is set to true). If it\n re-fetches an already existing variable, it will not add regularization\n again.\n\n Parameters\n ----------\n name: str\n name of the variable\n shape: list of ints\n shape of the variable you want to create\n stddev: positive float or None\n standard deviation of a truncated Gaussian\n wd: positive float or None\n add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this variable.\n norm: positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n\n Returns\n -------\n out : variable tensor\n '
if (stddev is None):
stddev = get_xavier_stddev(shape, uniform=False)
initializer = tf.truncated_normal_initializer(stddev=stddev)
var_before = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var = tf.get_variable(name, shape, dtype=tf.float32, initializer=initializer)
var_after = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if (len(var_before) != len(var_after)):
reg_loss = complex_reg(var, wd, norm)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, reg_loss)
variable_summaries(var, name)
return var
|
def variable_with_wd(name, shape, stddev=None, wd=None, norm=2):
' Helper to create an initialized variable with weight decay.\n\n Note that the variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified. Also will add summaries\n for this variable.\n\n Internally, it calls tf.get_variable, so you can use this to re-get already\n defined variables (so long as the reuse scope is set to true). If it\n re-fetches an already existing variable, it will not add regularization\n again.\n\n Parameters\n ----------\n name: str\n name of the variable\n shape: list of ints\n shape of the variable you want to create\n stddev: positive float or None\n standard deviation of a truncated Gaussian\n wd: positive float or None\n add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this variable.\n norm: positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n\n Returns\n -------\n out : variable tensor\n '
if (stddev is None):
stddev = get_xavier_stddev(shape, uniform=False)
initializer = tf.truncated_normal_initializer(stddev=stddev)
var_before = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var = tf.get_variable(name, shape, dtype=tf.float32, initializer=initializer)
var_after = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if (len(var_before) != len(var_after)):
reg_loss = complex_reg(var, wd, norm)
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, reg_loss)
variable_summaries(var, name)
return var<|docstring|>Helper to create an initialized variable with weight decay.
Note that the variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified. Also will add summaries
for this variable.
Internally, it calls tf.get_variable, so you can use this to re-get already
defined variables (so long as the reuse scope is set to true). If it
re-fetches an already existing variable, it will not add regularization
again.
Parameters
----------
name: str
name of the variable
shape: list of ints
shape of the variable you want to create
stddev: positive float or None
standard deviation of a truncated Gaussian
wd: positive float or None
add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this variable.
norm: positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
Returns
-------
out : variable tensor<|endoftext|>
|
e5c91efb75ac46e7d9935270d5071103e0dfc23881495b2030ca4fcfc2384eb4
|
def variable_summaries(var, name='summaries'):
'Attach a lot of summaries to a variable (for TensorBoard visualization).\n\n Parameters\n ----------\n var : :py:class:`tf.Tensor`\n variable for which you wish to create summaries\n name : str\n scope under which you want to add your summary ops\n '
with tf.name_scope((name + '_summaries')):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square((var - mean))), name='stddev')
tf.summary.scalar('standard_deviation', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
|
Attach a lot of summaries to a variable (for TensorBoard visualization).
Parameters
----------
var : :py:class:`tf.Tensor`
variable for which you wish to create summaries
name : str
scope under which you want to add your summary ops
|
tf_ops/general.py
|
variable_summaries
|
fbcotter/tf_ops
| 0 |
python
|
def variable_summaries(var, name='summaries'):
'Attach a lot of summaries to a variable (for TensorBoard visualization).\n\n Parameters\n ----------\n var : :py:class:`tf.Tensor`\n variable for which you wish to create summaries\n name : str\n scope under which you want to add your summary ops\n '
with tf.name_scope((name + '_summaries')):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square((var - mean))), name='stddev')
tf.summary.scalar('standard_deviation', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
|
def variable_summaries(var, name='summaries'):
'Attach a lot of summaries to a variable (for TensorBoard visualization).\n\n Parameters\n ----------\n var : :py:class:`tf.Tensor`\n variable for which you wish to create summaries\n name : str\n scope under which you want to add your summary ops\n '
with tf.name_scope((name + '_summaries')):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square((var - mean))), name='stddev')
tf.summary.scalar('standard_deviation', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)<|docstring|>Attach a lot of summaries to a variable (for TensorBoard visualization).
Parameters
----------
var : :py:class:`tf.Tensor`
variable for which you wish to create summaries
name : str
scope under which you want to add your summary ops<|endoftext|>
|
861701efbee355b19db26947f23d1a201a542dd18bd88761370e2fa0a3977ceb
|
def loss(labels, logits, one_hot=True, num_classes=None, λ=1):
" Compute sum of data + regularization losses.\n\n loss = data_loss + λ * reg_losses\n\n The regularization loss will sum over all the variables that already\n exist in the GraphKeys.REGULARIZATION_LOSSES.\n\n Parameters\n ----------\n labels : ndarray(dtype=float, ndim=(N,C))\n The vector of labels.\n one_hot : bool\n True if the labels input is one_hot.\n num_classes : int\n Needed if the labels aren't one-hot already.\n logits : tf.Variable\n Logit outputs from the neural net.\n λ : float\n Multiplier to use on all regularization losses. Be careful not\n to apply things twice, as all the functions in this module typically set\n regularization losses at a block level (for more fine control).\n For this reason it defaults to 1, but can be useful to set to some other\n value to get quick scaling of loss terms.\n\n Returns\n -------\n losses : tuple of (loss, data_loss, reg_loss)\n For optimization, only need to use the first element in the tuple. I\n return the other two for displaying purposes.\n "
with tf.variable_scope('data_loss'):
tf.summary.histogram('logits', logits)
tf.summary.histogram('softmax', tf.nn.softmax(logits))
if (not one_hot):
labels = tf.one_hot(labels, depth=num_classes, axis=(- 1))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
data_loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
with tf.variable_scope('reg_loss'):
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_term = tf.reduce_sum(reg_variables)
with tf.variable_scope('loss'):
loss = (data_loss + (λ * reg_term))
return (loss, data_loss, reg_term)
|
Compute sum of data + regularization losses.
loss = data_loss + λ * reg_losses
The regularization loss will sum over all the variables that already
exist in the GraphKeys.REGULARIZATION_LOSSES.
Parameters
----------
labels : ndarray(dtype=float, ndim=(N,C))
The vector of labels.
one_hot : bool
True if the labels input is one_hot.
num_classes : int
Needed if the labels aren't one-hot already.
logits : tf.Variable
Logit outputs from the neural net.
λ : float
Multiplier to use on all regularization losses. Be careful not
to apply things twice, as all the functions in this module typically set
regularization losses at a block level (for more fine control).
For this reason it defaults to 1, but can be useful to set to some other
value to get quick scaling of loss terms.
Returns
-------
losses : tuple of (loss, data_loss, reg_loss)
For optimization, only need to use the first element in the tuple. I
return the other two for displaying purposes.
|
tf_ops/general.py
|
loss
|
fbcotter/tf_ops
| 0 |
python
|
def loss(labels, logits, one_hot=True, num_classes=None, λ=1):
" Compute sum of data + regularization losses.\n\n loss = data_loss + λ * reg_losses\n\n The regularization loss will sum over all the variables that already\n exist in the GraphKeys.REGULARIZATION_LOSSES.\n\n Parameters\n ----------\n labels : ndarray(dtype=float, ndim=(N,C))\n The vector of labels.\n one_hot : bool\n True if the labels input is one_hot.\n num_classes : int\n Needed if the labels aren't one-hot already.\n logits : tf.Variable\n Logit outputs from the neural net.\n λ : float\n Multiplier to use on all regularization losses. Be careful not\n to apply things twice, as all the functions in this module typically set\n regularization losses at a block level (for more fine control).\n For this reason it defaults to 1, but can be useful to set to some other\n value to get quick scaling of loss terms.\n\n Returns\n -------\n losses : tuple of (loss, data_loss, reg_loss)\n For optimization, only need to use the first element in the tuple. I\n return the other two for displaying purposes.\n "
with tf.variable_scope('data_loss'):
tf.summary.histogram('logits', logits)
tf.summary.histogram('softmax', tf.nn.softmax(logits))
if (not one_hot):
labels = tf.one_hot(labels, depth=num_classes, axis=(- 1))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
data_loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
with tf.variable_scope('reg_loss'):
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_term = tf.reduce_sum(reg_variables)
with tf.variable_scope('loss'):
loss = (data_loss + (λ * reg_term))
return (loss, data_loss, reg_term)
|
def loss(labels, logits, one_hot=True, num_classes=None, λ=1):
" Compute sum of data + regularization losses.\n\n loss = data_loss + λ * reg_losses\n\n The regularization loss will sum over all the variables that already\n exist in the GraphKeys.REGULARIZATION_LOSSES.\n\n Parameters\n ----------\n labels : ndarray(dtype=float, ndim=(N,C))\n The vector of labels.\n one_hot : bool\n True if the labels input is one_hot.\n num_classes : int\n Needed if the labels aren't one-hot already.\n logits : tf.Variable\n Logit outputs from the neural net.\n λ : float\n Multiplier to use on all regularization losses. Be careful not\n to apply things twice, as all the functions in this module typically set\n regularization losses at a block level (for more fine control).\n For this reason it defaults to 1, but can be useful to set to some other\n value to get quick scaling of loss terms.\n\n Returns\n -------\n losses : tuple of (loss, data_loss, reg_loss)\n For optimization, only need to use the first element in the tuple. I\n return the other two for displaying purposes.\n "
with tf.variable_scope('data_loss'):
tf.summary.histogram('logits', logits)
tf.summary.histogram('softmax', tf.nn.softmax(logits))
if (not one_hot):
labels = tf.one_hot(labels, depth=num_classes, axis=(- 1))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
data_loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
with tf.variable_scope('reg_loss'):
reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_term = tf.reduce_sum(reg_variables)
with tf.variable_scope('loss'):
loss = (data_loss + (λ * reg_term))
return (loss, data_loss, reg_term)<|docstring|>Compute sum of data + regularization losses.
loss = data_loss + λ * reg_losses
The regularization loss will sum over all the variables that already
exist in the GraphKeys.REGULARIZATION_LOSSES.
Parameters
----------
labels : ndarray(dtype=float, ndim=(N,C))
The vector of labels.
one_hot : bool
True if the labels input is one_hot.
num_classes : int
Needed if the labels aren't one-hot already.
logits : tf.Variable
Logit outputs from the neural net.
λ : float
Multiplier to use on all regularization losses. Be careful not
to apply things twice, as all the functions in this module typically set
regularization losses at a block level (for more fine control).
For this reason it defaults to 1, but can be useful to set to some other
value to get quick scaling of loss terms.
Returns
-------
losses : tuple of (loss, data_loss, reg_loss)
For optimization, only need to use the first element in the tuple. I
return the other two for displaying purposes.<|endoftext|>
|
e2093e28238cd529c14bcca76f5469b75d67ed05a599a5a716c7298a7db84799
|
def fixed_padding(inputs, kernel_size, data_format):
"Pads the input along the spatial dimensions independently of input size.\n\n Parameters\n ----------\n inputs: tf.Tensor\n A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: int\n The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: str\n The input format ('channels_last' or 'channels_first').\n\n Returns\n -------\n y : tf.Tensor\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n "
pad_total = (kernel_size - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
if (data_format == 'channels_first'):
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
|
Pads the input along the spatial dimensions independently of input size.
Parameters
----------
inputs: tf.Tensor
A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: int
The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: str
The input format ('channels_last' or 'channels_first').
Returns
-------
y : tf.Tensor
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
|
tf_ops/general.py
|
fixed_padding
|
fbcotter/tf_ops
| 0 |
python
|
def fixed_padding(inputs, kernel_size, data_format):
"Pads the input along the spatial dimensions independently of input size.\n\n Parameters\n ----------\n inputs: tf.Tensor\n A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: int\n The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: str\n The input format ('channels_last' or 'channels_first').\n\n Returns\n -------\n y : tf.Tensor\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n "
pad_total = (kernel_size - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
if (data_format == 'channels_first'):
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs
|
def fixed_padding(inputs, kernel_size, data_format):
"Pads the input along the spatial dimensions independently of input size.\n\n Parameters\n ----------\n inputs: tf.Tensor\n A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: int\n The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: str\n The input format ('channels_last' or 'channels_first').\n\n Returns\n -------\n y : tf.Tensor\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n "
pad_total = (kernel_size - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
if (data_format == 'channels_first'):
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return padded_inputs<|docstring|>Pads the input along the spatial dimensions independently of input size.
Parameters
----------
inputs: tf.Tensor
A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: int
The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: str
The input format ('channels_last' or 'channels_first').
Returns
-------
y : tf.Tensor
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).<|endoftext|>
|
bf36cdd2f0e4cdcfff2d1b2e74c47704ed0c56b2c21f73e63fa37f7af674fe4f
|
def _residual_core(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001):
' Core function of a residual unit.\n\n In -> conv -> bn -> relu -> conv\n\n Note that the normal residual layer has a batch norm and relu before the\n first conv. This is in the residual function which calls this.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n kernel_size : int\n Size of the filter kernels\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n '
init = init_ops.VarianceScaling(scale=1.0, mode='fan_out')
reg = (lambda w: real_reg(w, wd, norm=2))
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
conv_class = (lambda name, stride: convolutional.Conv2D(filters, 3, (stride, stride), use_bias=False, padding=('SAME' if (stride == 1) else 'VALID'), kernel_initializer=init, kernel_regularizer=reg, name=name))
with tf.variable_scope('sub1'):
if (stride > 1):
x = fixed_padding(x, kernel_size, 'channels_last')
conv = conv_class('conv1', stride)
x = conv.apply(x)
with tf.variable_scope('sub2'):
bn = bn_class('between_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
conv = conv_class('conv2', 1)
x = conv.apply(x)
return x
|
Core function of a residual unit.
In -> conv -> bn -> relu -> conv
Note that the normal residual layer has a batch norm and relu before the
first conv. This is in the residual function which calls this.
Parameters
----------
x : tf tensor
Input to be modified
filters : int
Number of output filters (will be used for all convolutions in the
resnet core).
kernel_size : int
Size of the filter kernels
stride : int
Conv stride
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
bn_momentum : float
The momentum for the batch normalization layers in the resnet
bn_epsilon : float
The epsilon for the batch normalization layers in the resnet
|
tf_ops/general.py
|
_residual_core
|
fbcotter/tf_ops
| 0 |
python
|
def _residual_core(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001):
' Core function of a residual unit.\n\n In -> conv -> bn -> relu -> conv\n\n Note that the normal residual layer has a batch norm and relu before the\n first conv. This is in the residual function which calls this.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n kernel_size : int\n Size of the filter kernels\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n '
init = init_ops.VarianceScaling(scale=1.0, mode='fan_out')
reg = (lambda w: real_reg(w, wd, norm=2))
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
conv_class = (lambda name, stride: convolutional.Conv2D(filters, 3, (stride, stride), use_bias=False, padding=('SAME' if (stride == 1) else 'VALID'), kernel_initializer=init, kernel_regularizer=reg, name=name))
with tf.variable_scope('sub1'):
if (stride > 1):
x = fixed_padding(x, kernel_size, 'channels_last')
conv = conv_class('conv1', stride)
x = conv.apply(x)
with tf.variable_scope('sub2'):
bn = bn_class('between_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
conv = conv_class('conv2', 1)
x = conv.apply(x)
return x
|
def _residual_core(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001):
' Core function of a residual unit.\n\n In -> conv -> bn -> relu -> conv\n\n Note that the normal residual layer has a batch norm and relu before the\n first conv. This is in the residual function which calls this.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n kernel_size : int\n Size of the filter kernels\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n '
init = init_ops.VarianceScaling(scale=1.0, mode='fan_out')
reg = (lambda w: real_reg(w, wd, norm=2))
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
conv_class = (lambda name, stride: convolutional.Conv2D(filters, 3, (stride, stride), use_bias=False, padding=('SAME' if (stride == 1) else 'VALID'), kernel_initializer=init, kernel_regularizer=reg, name=name))
with tf.variable_scope('sub1'):
if (stride > 1):
x = fixed_padding(x, kernel_size, 'channels_last')
conv = conv_class('conv1', stride)
x = conv.apply(x)
with tf.variable_scope('sub2'):
bn = bn_class('between_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
conv = conv_class('conv2', 1)
x = conv.apply(x)
return x<|docstring|>Core function of a residual unit.
In -> conv -> bn -> relu -> conv
Note that the normal residual layer has a batch norm and relu before the
first conv. This is in the residual function which calls this.
Parameters
----------
x : tf tensor
Input to be modified
filters : int
Number of output filters (will be used for all convolutions in the
resnet core).
kernel_size : int
Size of the filter kernels
stride : int
Conv stride
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
bn_momentum : float
The momentum for the batch normalization layers in the resnet
bn_epsilon : float
The epsilon for the batch normalization layers in the resnet<|endoftext|>
|
23c9e1fc005d3fb41a88db4ccfc90cd6a8a3218b5bc69d54bfa236baf83b03c1
|
def residual(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001, name='res'):
' Residual layer\n\n Uses the _residual_core function to create F(x), then adds x to it.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n\n Notes\n -----\n When training, the moving_mean and moving_variance need to be updated. By\n default the update ops are placed in tf.GraphKeys.UPDATE_OPS, so they need\n to be added as a dependency to the train_op. For example::\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss)\n '
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
orig_x = x
with tf.variable_scope(name):
bn = bn_class('init_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
if (stride > 1):
orig_x = tf.layers.conv2d(orig_x, filters=filters, strides=stride, kernel_size=1, padding='VALID', use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format='channels_last')
x = _residual_core(x, filters, kernel_size, stride, train, wd, bn_momentum, bn_epsilon)
y = tf.add(x, orig_x)
return y
|
Residual layer
Uses the _residual_core function to create F(x), then adds x to it.
Parameters
----------
x : tf tensor
Input to be modified
filters : int
Number of output filters (will be used for all convolutions in the
resnet core).
stride : int
Conv stride
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
bn_momentum : float
The momentum for the batch normalization layers in the resnet
bn_epsilon : float
The epsilon for the batch normalization layers in the resnet
Notes
-----
When training, the moving_mean and moving_variance need to be updated. By
default the update ops are placed in tf.GraphKeys.UPDATE_OPS, so they need
to be added as a dependency to the train_op. For example::
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
|
tf_ops/general.py
|
residual
|
fbcotter/tf_ops
| 0 |
python
|
def residual(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001, name='res'):
' Residual layer\n\n Uses the _residual_core function to create F(x), then adds x to it.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n\n Notes\n -----\n When training, the moving_mean and moving_variance need to be updated. By\n default the update ops are placed in tf.GraphKeys.UPDATE_OPS, so they need\n to be added as a dependency to the train_op. For example::\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss)\n '
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
orig_x = x
with tf.variable_scope(name):
bn = bn_class('init_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
if (stride > 1):
orig_x = tf.layers.conv2d(orig_x, filters=filters, strides=stride, kernel_size=1, padding='VALID', use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format='channels_last')
x = _residual_core(x, filters, kernel_size, stride, train, wd, bn_momentum, bn_epsilon)
y = tf.add(x, orig_x)
return y
|
def residual(x, filters, kernel_size=3, stride=1, train=True, wd=0.0, bn_momentum=0.99, bn_epsilon=0.001, name='res'):
' Residual layer\n\n Uses the _residual_core function to create F(x), then adds x to it.\n\n Parameters\n ----------\n x : tf tensor\n Input to be modified\n filters : int\n Number of output filters (will be used for all convolutions in the\n resnet core).\n stride : int\n Conv stride\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n bn_momentum : float\n The momentum for the batch normalization layers in the resnet\n bn_epsilon : float\n The epsilon for the batch normalization layers in the resnet\n\n Notes\n -----\n When training, the moving_mean and moving_variance need to be updated. By\n default the update ops are placed in tf.GraphKeys.UPDATE_OPS, so they need\n to be added as a dependency to the train_op. For example::\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss)\n '
bn_class = (lambda name: normalization.BatchNormalization(name=name, momentum=bn_momentum, epsilon=bn_epsilon))
orig_x = x
with tf.variable_scope(name):
bn = bn_class('init_bn')
x = bn.apply(x, training=train)
x = tf.nn.relu(x)
if (stride > 1):
orig_x = tf.layers.conv2d(orig_x, filters=filters, strides=stride, kernel_size=1, padding='VALID', use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format='channels_last')
x = _residual_core(x, filters, kernel_size, stride, train, wd, bn_momentum, bn_epsilon)
y = tf.add(x, orig_x)
return y<|docstring|>Residual layer
Uses the _residual_core function to create F(x), then adds x to it.
Parameters
----------
x : tf tensor
Input to be modified
filters : int
Number of output filters (will be used for all convolutions in the
resnet core).
stride : int
Conv stride
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
bn_momentum : float
The momentum for the batch normalization layers in the resnet
bn_epsilon : float
The epsilon for the batch normalization layers in the resnet
Notes
-----
When training, the moving_mean and moving_variance need to be updated. By
default the update ops are placed in tf.GraphKeys.UPDATE_OPS, so they need
to be added as a dependency to the train_op. For example::
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)<|endoftext|>
|
b26bf83b464dac74c30aee6b066861673f884b91ed0564791eeb6dc4126f1a1e
|
def lift_residual_resample(x1, x2, filters, train=True, downsize=True, wd=0.0001):
'Define a Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for P*x2 and U*d\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
assert (x1.get_shape().as_list() == x2.get_shape().as_list())
if downsize:
x1 = lazy_wavelet(x1)
x2 = lazy_wavelet(x2)
in_channels = x1.get_shape().as_list()[(- 1)]
if ((filters % in_channels) != 0):
raise ValueError(('Can only expand an image by an integer number ' + 'of its channels'))
if (filters != in_channels):
nstack = (filters // in_channels)
x1 = tf.concat(([x1] * nstack), axis=(- 1))
(d, s) = lift_residual(x1, x2, train, wd)
return (d, s)
|
Define a Lifting Layer with resizing
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
_______________
| |
x1->|---(+)---------|->d
| ^ | |
| | | |
| --- --- |
| |-P | | U | |
| --- --- |
| | | |
| | v |
x2->|----------(+)--|->s
|_______________|
Parameters
----------
x1 : tf tensor
Input tensor 1
x2 : tf tensor
Input tensor 2
filters : int
Number of output channels for P*x2 and U*d
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
d : tf tensor
Detail coefficients
s : tf tensor
Scale coefficients
|
tf_ops/general.py
|
lift_residual_resample
|
fbcotter/tf_ops
| 0 |
python
|
def lift_residual_resample(x1, x2, filters, train=True, downsize=True, wd=0.0001):
'Define a Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for P*x2 and U*d\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
assert (x1.get_shape().as_list() == x2.get_shape().as_list())
if downsize:
x1 = lazy_wavelet(x1)
x2 = lazy_wavelet(x2)
in_channels = x1.get_shape().as_list()[(- 1)]
if ((filters % in_channels) != 0):
raise ValueError(('Can only expand an image by an integer number ' + 'of its channels'))
if (filters != in_channels):
nstack = (filters // in_channels)
x1 = tf.concat(([x1] * nstack), axis=(- 1))
(d, s) = lift_residual(x1, x2, train, wd)
return (d, s)
|
def lift_residual_resample(x1, x2, filters, train=True, downsize=True, wd=0.0001):
'Define a Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for P*x2 and U*d\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
assert (x1.get_shape().as_list() == x2.get_shape().as_list())
if downsize:
x1 = lazy_wavelet(x1)
x2 = lazy_wavelet(x2)
in_channels = x1.get_shape().as_list()[(- 1)]
if ((filters % in_channels) != 0):
raise ValueError(('Can only expand an image by an integer number ' + 'of its channels'))
if (filters != in_channels):
nstack = (filters // in_channels)
x1 = tf.concat(([x1] * nstack), axis=(- 1))
(d, s) = lift_residual(x1, x2, train, wd)
return (d, s)<|docstring|>Define a Lifting Layer with resizing
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
_______________
| |
x1->|---(+)---------|->d
| ^ | |
| | | |
| --- --- |
| |-P | | U | |
| --- --- |
| | | |
| | v |
x2->|----------(+)--|->s
|_______________|
Parameters
----------
x1 : tf tensor
Input tensor 1
x2 : tf tensor
Input tensor 2
filters : int
Number of output channels for P*x2 and U*d
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
d : tf tensor
Detail coefficients
s : tf tensor
Scale coefficients<|endoftext|>
|
38aeb63e8f75508a108441ce4b8a3d664b2866df6920a412d5fc46254589a6f5
|
def lift_residual_resample_inv(d, s, out_size, train=True, wd=0.0001):
'Define a inverse Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n out_size : list of ints\n Size of the resulting x1 tensors.\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
assert (d.get_shape().as_list() == s.get_shape().as_list())
(x1, x2) = lift_residual_inv(d, s, train, wd)
if (out_size[(- 1)] != x1.get_shape().as_list()[(- 1)]):
x1 = lazy_wavelet_inv(x1, out_size)
x2 = lazy_wavelet_inv(x2, out_size)
return (x1, x2)
|
Define a inverse Lifting Layer with resizing
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
We share the variables with the forward lifting.
In block form, the inverse lifting layer looks like this (note the sign swap
and flow direction reversal compared to the forward case)::
_______________
| |
x1<-|---(+)---------|<-d
| ^ | |
| | | |
| --- --- |
| | P | |-U | |
| --- --- |
| | | |
| | v |
x2<-|----------(+)--|<-s
|_______________|
Parameters
----------
d : tf tensor
Input tensor 1
s : tf tensor
Input tensor 2
out_size : list of ints
Size of the resulting x1 tensors.
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
x1 : tf tensor
Reconstructed x1
x2 : tf tensor
Reconstructed x2
|
tf_ops/general.py
|
lift_residual_resample_inv
|
fbcotter/tf_ops
| 0 |
python
|
def lift_residual_resample_inv(d, s, out_size, train=True, wd=0.0001):
'Define a inverse Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n out_size : list of ints\n Size of the resulting x1 tensors.\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
assert (d.get_shape().as_list() == s.get_shape().as_list())
(x1, x2) = lift_residual_inv(d, s, train, wd)
if (out_size[(- 1)] != x1.get_shape().as_list()[(- 1)]):
x1 = lazy_wavelet_inv(x1, out_size)
x2 = lazy_wavelet_inv(x2, out_size)
return (x1, x2)
|
def lift_residual_resample_inv(d, s, out_size, train=True, wd=0.0001):
'Define a inverse Lifting Layer with resizing\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n out_size : list of ints\n Size of the resulting x1 tensors.\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
assert (d.get_shape().as_list() == s.get_shape().as_list())
(x1, x2) = lift_residual_inv(d, s, train, wd)
if (out_size[(- 1)] != x1.get_shape().as_list()[(- 1)]):
x1 = lazy_wavelet_inv(x1, out_size)
x2 = lazy_wavelet_inv(x2, out_size)
return (x1, x2)<|docstring|>Define a inverse Lifting Layer with resizing
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
We share the variables with the forward lifting.
In block form, the inverse lifting layer looks like this (note the sign swap
and flow direction reversal compared to the forward case)::
_______________
| |
x1<-|---(+)---------|<-d
| ^ | |
| | | |
| --- --- |
| | P | |-U | |
| --- --- |
| | | |
| | v |
x2<-|----------(+)--|<-s
|_______________|
Parameters
----------
d : tf tensor
Input tensor 1
s : tf tensor
Input tensor 2
out_size : list of ints
Size of the resulting x1 tensors.
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
x1 : tf tensor
Reconstructed x1
x2 : tf tensor
Reconstructed x2<|endoftext|>
|
9b5c6d835760c014a2fa68a1a1a1687c780bb85bb70720dd6f368365569c7412
|
def lift_residual(x1, x2, train=True, wd=0.0001):
'Define a Lifting Layer\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
filters = x1.get_shape().as_list()[(- 1)]
assert (filters == x2.get_shape().as_list()[(- 1)])
with tf.variable_scope('P'):
d = (x1 - _residual_core(x2, filters, 1, train, wd))
with tf.variable_scope('U'):
s = (x2 + _residual_core(d, filters, 1, train, wd))
return (d, s)
|
Define a Lifting Layer
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
_______________
| |
x1->|---(+)---------|->d
| ^ | |
| | | |
| --- --- |
| |-P | | U | |
| --- --- |
| | | |
| | v |
x2->|----------(+)--|->s
|_______________|
Parameters
----------
x1 : tf tensor
Input tensor 1
x2 : tf tensor
Input tensor 2
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
d : tf tensor
Detail coefficients
s : tf tensor
Scale coefficients
|
tf_ops/general.py
|
lift_residual
|
fbcotter/tf_ops
| 0 |
python
|
def lift_residual(x1, x2, train=True, wd=0.0001):
'Define a Lifting Layer\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
filters = x1.get_shape().as_list()[(- 1)]
assert (filters == x2.get_shape().as_list()[(- 1)])
with tf.variable_scope('P'):
d = (x1 - _residual_core(x2, filters, 1, train, wd))
with tf.variable_scope('U'):
s = (x2 + _residual_core(d, filters, 1, train, wd))
return (d, s)
|
def lift_residual(x1, x2, train=True, wd=0.0001):
'Define a Lifting Layer\n\n The P and the U blocks for this lifting layer are non-linear functions.\n These are the same form as the F(x) in a residual layer (i.e. two\n convolutions). In block form, a lifting layer looks like this::\n\n _______________\n | |\n x1->|---(+)---------|->d\n | ^ | |\n | | | |\n | --- --- |\n | |-P | | U | |\n | --- --- |\n | | | |\n | | v |\n x2->|----------(+)--|->s\n |_______________|\n\n Parameters\n ----------\n x1 : tf tensor\n Input tensor 1\n x2 : tf tensor\n Input tensor 2\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n d : tf tensor\n Detail coefficients\n s : tf tensor\n Scale coefficients\n '
filters = x1.get_shape().as_list()[(- 1)]
assert (filters == x2.get_shape().as_list()[(- 1)])
with tf.variable_scope('P'):
d = (x1 - _residual_core(x2, filters, 1, train, wd))
with tf.variable_scope('U'):
s = (x2 + _residual_core(d, filters, 1, train, wd))
return (d, s)<|docstring|>Define a Lifting Layer
The P and the U blocks for this lifting layer are non-linear functions.
These are the same form as the F(x) in a residual layer (i.e. two
convolutions). In block form, a lifting layer looks like this::
_______________
| |
x1->|---(+)---------|->d
| ^ | |
| | | |
| --- --- |
| |-P | | U | |
| --- --- |
| | | |
| | v |
x2->|----------(+)--|->s
|_______________|
Parameters
----------
x1 : tf tensor
Input tensor 1
x2 : tf tensor
Input tensor 2
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
d : tf tensor
Detail coefficients
s : tf tensor
Scale coefficients<|endoftext|>
|
7b197756414880fbfc03fd4284f84207dd18d5a3aac9f47c713d8dad7e2a99e6
|
def lift_residual_inv(d, s, train=True, wd=0.0001):
'Define the inverse of a lifting layer\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for Px2 and Ud\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
filters = d.get_shape().as_list()[(- 1)]
assert (filters == s.get_shape().as_list()[(- 1)])
with tf.variable_scope('U') as scope:
scope.reuse_variables()
x2 = (s - _residual_core(d, filters, 1, train, wd))
with tf.variable_scope('P') as scope:
scope.reuse_variables()
x1 = (d + _residual_core(x2, filters, 1, train, wd))
return (x1, x2)
|
Define the inverse of a lifting layer
We share the variables with the forward lifting.
In block form, the inverse lifting layer looks like this (note the sign swap
and flow direction reversal compared to the forward case)::
_______________
| |
x1<-|---(+)---------|<-d
| ^ | |
| | | |
| --- --- |
| | P | |-U | |
| --- --- |
| | | |
| | v |
x2<-|----------(+)--|<-s
|_______________|
Parameters
----------
d : tf tensor
Input tensor 1
s : tf tensor
Input tensor 2
filters : int
Number of output channels for Px2 and Ud
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
x1 : tf tensor
Reconstructed x1
x2 : tf tensor
Reconstructed x2
|
tf_ops/general.py
|
lift_residual_inv
|
fbcotter/tf_ops
| 0 |
python
|
def lift_residual_inv(d, s, train=True, wd=0.0001):
'Define the inverse of a lifting layer\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for Px2 and Ud\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
filters = d.get_shape().as_list()[(- 1)]
assert (filters == s.get_shape().as_list()[(- 1)])
with tf.variable_scope('U') as scope:
scope.reuse_variables()
x2 = (s - _residual_core(d, filters, 1, train, wd))
with tf.variable_scope('P') as scope:
scope.reuse_variables()
x1 = (d + _residual_core(x2, filters, 1, train, wd))
return (x1, x2)
|
def lift_residual_inv(d, s, train=True, wd=0.0001):
'Define the inverse of a lifting layer\n\n We share the variables with the forward lifting.\n\n In block form, the inverse lifting layer looks like this (note the sign swap\n and flow direction reversal compared to the forward case)::\n\n _______________\n | |\n x1<-|---(+)---------|<-d\n | ^ | |\n | | | |\n | --- --- |\n | | P | |-U | |\n | --- --- |\n | | | |\n | | v |\n x2<-|----------(+)--|<-s\n |_______________|\n\n\n Parameters\n ----------\n d : tf tensor\n Input tensor 1\n s : tf tensor\n Input tensor 2\n filters : int\n Number of output channels for Px2 and Ud\n train : bool or tf boolean tensor\n Whether we are in the train phase or not. Can set to a tensorflow tensor\n so that it can be modified on the fly.\n wd : float\n Weight decay term for the convolutional weights\n\n Returns\n -------\n x1 : tf tensor\n Reconstructed x1\n x2 : tf tensor\n Reconstructed x2\n '
filters = d.get_shape().as_list()[(- 1)]
assert (filters == s.get_shape().as_list()[(- 1)])
with tf.variable_scope('U') as scope:
scope.reuse_variables()
x2 = (s - _residual_core(d, filters, 1, train, wd))
with tf.variable_scope('P') as scope:
scope.reuse_variables()
x1 = (d + _residual_core(x2, filters, 1, train, wd))
return (x1, x2)<|docstring|>Define the inverse of a lifting layer
We share the variables with the forward lifting.
In block form, the inverse lifting layer looks like this (note the sign swap
and flow direction reversal compared to the forward case)::
_______________
| |
x1<-|---(+)---------|<-d
| ^ | |
| | | |
| --- --- |
| | P | |-U | |
| --- --- |
| | | |
| | v |
x2<-|----------(+)--|<-s
|_______________|
Parameters
----------
d : tf tensor
Input tensor 1
s : tf tensor
Input tensor 2
filters : int
Number of output channels for Px2 and Ud
train : bool or tf boolean tensor
Whether we are in the train phase or not. Can set to a tensorflow tensor
so that it can be modified on the fly.
wd : float
Weight decay term for the convolutional weights
Returns
-------
x1 : tf tensor
Reconstructed x1
x2 : tf tensor
Reconstructed x2<|endoftext|>
|
8461898ab77c9857b793f5dbba9f16e749bc4022e12fa092946dbfee28a1f5bf
|
def complex_convolution(x, output_dim, size=3, stride=1, stddev=None, wd=0.0, norm=1.0, name='conv2d', with_bias=False, bias_start=0.0):
'Function to do complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d`. If you want more fine control over things, use\n cconv2d directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n with_bias : bool\n add a bias after convolution? (this will be ignored if batch norm is\n used)\n bias_start : complex float\n If a bias is used, what to initialize it to.\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d(x, w, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
if with_bias:
init = tf.constant_initializer(bias_start)
b_r = tf.get_variable('b_real', [output_dim], initializer=init)
b_i = tf.get_variable('b_imag', [output_dim], initializer=init)
varlist.append(tf.complex(b_r, b_i))
y_r = tf.add(y_r, b_r)
y_i = tf.add(y_i, b_i)
y = tf.complex(y_r, y_i)
return y
|
Function to do complex convolution
In a similar way we have a convenience function, :py:func:`convolution` to
wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps
:py:func:`cconv2d`. If you want more fine control over things, use
cconv2d directly, but for most purposes, this function should do
what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES
if the wd parameter is positive.
Parameters
----------
x : :py:class:`tf.Tensor`
The input variable
output_dim : int
number of filters to have
size : int
kernel spatial support
stride : int
what stride to use for convolution
stddev : None or positive float
Initialization stddev. If set to None, will use
:py:func:`get_xavier_stddev`
wd : None or positive float
What weight decay to use
norm : positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
name : str
The tensorflow variable scope to create the variables under
with_bias : bool
add a bias after convolution? (this will be ignored if batch norm is
used)
bias_start : complex float
If a bias is used, what to initialize it to.
Returns
-------
y : :py:class:`tf.Tensor`
Result of applying complex convolution to x
|
tf_ops/general.py
|
complex_convolution
|
fbcotter/tf_ops
| 0 |
python
|
def complex_convolution(x, output_dim, size=3, stride=1, stddev=None, wd=0.0, norm=1.0, name='conv2d', with_bias=False, bias_start=0.0):
'Function to do complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d`. If you want more fine control over things, use\n cconv2d directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n with_bias : bool\n add a bias after convolution? (this will be ignored if batch norm is\n used)\n bias_start : complex float\n If a bias is used, what to initialize it to.\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d(x, w, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
if with_bias:
init = tf.constant_initializer(bias_start)
b_r = tf.get_variable('b_real', [output_dim], initializer=init)
b_i = tf.get_variable('b_imag', [output_dim], initializer=init)
varlist.append(tf.complex(b_r, b_i))
y_r = tf.add(y_r, b_r)
y_i = tf.add(y_i, b_i)
y = tf.complex(y_r, y_i)
return y
|
def complex_convolution(x, output_dim, size=3, stride=1, stddev=None, wd=0.0, norm=1.0, name='conv2d', with_bias=False, bias_start=0.0):
'Function to do complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d`. If you want more fine control over things, use\n cconv2d directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n with_bias : bool\n add a bias after convolution? (this will be ignored if batch norm is\n used)\n bias_start : complex float\n If a bias is used, what to initialize it to.\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d(x, w, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
if with_bias:
init = tf.constant_initializer(bias_start)
b_r = tf.get_variable('b_real', [output_dim], initializer=init)
b_i = tf.get_variable('b_imag', [output_dim], initializer=init)
varlist.append(tf.complex(b_r, b_i))
y_r = tf.add(y_r, b_r)
y_i = tf.add(y_i, b_i)
y = tf.complex(y_r, y_i)
return y<|docstring|>Function to do complex convolution
In a similar way we have a convenience function, :py:func:`convolution` to
wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps
:py:func:`cconv2d`. If you want more fine control over things, use
cconv2d directly, but for most purposes, this function should do
what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES
if the wd parameter is positive.
Parameters
----------
x : :py:class:`tf.Tensor`
The input variable
output_dim : int
number of filters to have
size : int
kernel spatial support
stride : int
what stride to use for convolution
stddev : None or positive float
Initialization stddev. If set to None, will use
:py:func:`get_xavier_stddev`
wd : None or positive float
What weight decay to use
norm : positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
name : str
The tensorflow variable scope to create the variables under
with_bias : bool
add a bias after convolution? (this will be ignored if batch norm is
used)
bias_start : complex float
If a bias is used, what to initialize it to.
Returns
-------
y : :py:class:`tf.Tensor`
Result of applying complex convolution to x<|endoftext|>
|
3be224fb5770069482cbd4f88102676e6248e44155d76e2bf7bfec267df811a6
|
def complex_convolution_transpose(x, output_dim, shape, size=3, stride=1, stddev=None, wd=0.0, norm=1, name='conv2d'):
'Function to do the conjugate transpose of complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d_transpose`. If you want more fine control over things, use\n cconv2d_transpose directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n We do not subtract the bias after doing the transpose convolution.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n output_shape : list-like or 1-d Tensor\n list/tensor representing the output shape of the deconvolution op\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution transpose to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d_transpose(x, w, output_dim, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
y = tf.complex(y_r, y_i)
return y
|
Function to do the conjugate transpose of complex convolution
In a similar way we have a convenience function, :py:func:`convolution` to
wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps
:py:func:`cconv2d_transpose`. If you want more fine control over things, use
cconv2d_transpose directly, but for most purposes, this function should do
what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES
if the wd parameter is positive.
We do not subtract the bias after doing the transpose convolution.
Parameters
----------
x : :py:class:`tf.Tensor`
The input variable
output_dim : int
number of filters to have
output_shape : list-like or 1-d Tensor
list/tensor representing the output shape of the deconvolution op
size : int
kernel spatial support
stride : int
what stride to use for convolution
stddev : None or positive float
Initialization stddev. If set to None, will use
:py:func:`get_xavier_stddev`
wd : None or positive float
What weight decay to use
norm : positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
name : str
The tensorflow variable scope to create the variables under
Returns
-------
y : :py:class:`tf.Tensor`
Result of applying complex convolution transpose to x
|
tf_ops/general.py
|
complex_convolution_transpose
|
fbcotter/tf_ops
| 0 |
python
|
def complex_convolution_transpose(x, output_dim, shape, size=3, stride=1, stddev=None, wd=0.0, norm=1, name='conv2d'):
'Function to do the conjugate transpose of complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d_transpose`. If you want more fine control over things, use\n cconv2d_transpose directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n We do not subtract the bias after doing the transpose convolution.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n output_shape : list-like or 1-d Tensor\n list/tensor representing the output shape of the deconvolution op\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution transpose to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d_transpose(x, w, output_dim, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
y = tf.complex(y_r, y_i)
return y
|
def complex_convolution_transpose(x, output_dim, shape, size=3, stride=1, stddev=None, wd=0.0, norm=1, name='conv2d'):
'Function to do the conjugate transpose of complex convolution\n\n In a similar way we have a convenience function, :py:func:`convolution` to\n wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps\n :py:func:`cconv2d_transpose`. If you want more fine control over things, use\n cconv2d_transpose directly, but for most purposes, this function should do\n what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES\n if the wd parameter is positive.\n\n We do not subtract the bias after doing the transpose convolution.\n\n Parameters\n ----------\n x : :py:class:`tf.Tensor`\n The input variable\n output_dim : int\n number of filters to have\n output_shape : list-like or 1-d Tensor\n list/tensor representing the output shape of the deconvolution op\n size : int\n kernel spatial support\n stride : int\n what stride to use for convolution\n stddev : None or positive float\n Initialization stddev. If set to None, will use\n :py:func:`get_xavier_stddev`\n wd : None or positive float\n What weight decay to use\n norm : positive float\n Which regularizer to apply. E.g. norm=2 uses L2 regularization, and\n norm=p adds :math:`wd \\times ||w||_{p}^{p}` to the\n REGULARIZATION_LOSSES. See :py:func:`real_reg`.\n name : str\n The tensorflow variable scope to create the variables under\n\n Returns\n -------\n y : :py:class:`tf.Tensor`\n Result of applying complex convolution transpose to x\n '
varlist = []
with tf.variable_scope(name):
w_shape = [size, size, x.get_shape().as_list()[(- 1)], output_dim]
w_r = variable_with_wd('w_real', w_shape, stddev, wd, norm)
w_i = variable_with_wd('w_imag', w_shape, stddev, wd, norm)
w = tf.complex(w_r, w_i)
varlist.append(w)
y = cconv2d_transpose(x, w, output_dim, strides=[1, stride, stride, 1], name=name)
(y_r, y_i) = (tf.real(y), tf.imag(y))
y = tf.complex(y_r, y_i)
return y<|docstring|>Function to do the conjugate transpose of complex convolution
In a similar way we have a convenience function, :py:func:`convolution` to
wrap tf.nn.conv2d (create variables, add a relu, etc.), this function wraps
:py:func:`cconv2d_transpose`. If you want more fine control over things, use
cconv2d_transpose directly, but for most purposes, this function should do
what you need. Adds the variables to tf.GraphKeys.REGULARIZATION_LOSSES
if the wd parameter is positive.
We do not subtract the bias after doing the transpose convolution.
Parameters
----------
x : :py:class:`tf.Tensor`
The input variable
output_dim : int
number of filters to have
output_shape : list-like or 1-d Tensor
list/tensor representing the output shape of the deconvolution op
size : int
kernel spatial support
stride : int
what stride to use for convolution
stddev : None or positive float
Initialization stddev. If set to None, will use
:py:func:`get_xavier_stddev`
wd : None or positive float
What weight decay to use
norm : positive float
Which regularizer to apply. E.g. norm=2 uses L2 regularization, and
norm=p adds :math:`wd \times ||w||_{p}^{p}` to the
REGULARIZATION_LOSSES. See :py:func:`real_reg`.
name : str
The tensorflow variable scope to create the variables under
Returns
-------
y : :py:class:`tf.Tensor`
Result of applying complex convolution transpose to x<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.